{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 自然语言编码转换\n",
    "import codecs\n",
    "import functools\n",
    "import random\n",
    "\n",
    "# zip压缩与解压缩\n",
    "import os\n",
    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n",
    "import time\n",
    "import zipfile\n",
    "#import readdata as rd\n",
    "from vocab import Vocab\n",
    "from tree import Tree\n",
    "import numpy as np\n",
    "import pickle\n",
    "import tensorflow as tf\n",
    "import tensorflow_fold as td\n",
    "import sys\n",
    "from dbOneModelV9 import select\n",
    "import gc\n",
    "#sys.setrecursionlimit(10000000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "vocab = Vocab('/home/x/mydisk/NewestTreeLSTMData/vocabulary/vocabulary.txt')\n",
    "vocab.add('<unk>')\n",
    "weight_matrix = np.random.uniform(-0.05,0.05,(36378,50)).astype(np.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 改写原始LSTM结构，使其支持动态数量的state输入\n",
    "class ChildSumTreeLSTM(tf.contrib.rnn.BasicLSTMCell):\n",
    "    \n",
    "    def __init__(self,mem_dim,keep_prob = 1.0):\n",
    "        super(ChildSumTreeLSTM,self).__init__(mem_dim)\n",
    "        #self.in_dim = in_dim\n",
    "        self.mem_dim = mem_dim\n",
    "        #self.out_dim = out_dim\n",
    "        self._keep_prob = keep_prob\n",
    "    \n",
    "    def __call__(self,inputs,state,scope=None):\n",
    "        with tf.variable_scope(scope or type(self).__name__):\n",
    "            child_state = state\n",
    "            #print \"############################################\"\n",
    "            #print \"len(child_state):\",len(child_state)\n",
    "            #print \"state : \",child_state\n",
    "            # 下面分步骤执行tf.concat([inputs,h0,h1,h2...hn],1)\n",
    "            com = [hi for ci,hi in child_state]\n",
    "            com.insert(0,inputs)\n",
    "            #mem = tf.concat(com,1)\n",
    "            #print(\"mem.shape: \",mem.get_shape())\n",
    "            \n",
    "            concat = tf.contrib.layers.linear(tf.concat(com,1)\n",
    "                                              ,(len(child_state)+3)*self._num_units)\n",
    "            splits = tf.split(value=concat,num_or_size_splits=(len(child_state)+3),axis=1)\n",
    "            i = splits[0]\n",
    "            j = splits[1]\n",
    "            o = splits[-1]\n",
    "            f = splits[2:-1]\n",
    "            # j是输出门的值，需要执行tanh函数变化，此时self._activation是tanh\n",
    "            j = self._activation(j)\n",
    "            if not isinstance(self._keep_prob, float) or self._keep_prob < 1:\n",
    "                j = tf.nn.dropout(j,self._keep_prob)\n",
    "            #print(\"f0: \",f[0])\n",
    "            one = child_state[0][0]*tf.sigmoid(f[0]+self._forget_bias)\n",
    "            #print \"f-len: \",len(f)\n",
    "            for k in range(len(f)):\n",
    "                # 0代表c\n",
    "                one += child_state[k][0]*tf.sigmoid(f[k]+self._forget_bias)\n",
    "            \n",
    "            new_c = one+tf.sigmoid(i)*j\n",
    "            \n",
    "            new_h = self._activation(new_c)*tf.sigmoid(o)\n",
    "            \n",
    "            new_state = tf.contrib.rnn.LSTMStateTuple(new_c,new_h)\n",
    "            \n",
    "        return new_h, new_state"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_embedding(weight_matrix):\n",
    "    return td.Embedding(*weight_matrix.shape, initializer=weight_matrix, name='word_embedding')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_model(word_embedding,NUM_CLASS,lstm_num_units=300,keep_prob=1.0,vocab=vocab):\n",
    "    \n",
    "    # 建立treelstm隐藏层,这里的lstm单元有300维\n",
    "    tree_lstm1 = td.ScopedLayer(\n",
    "            tf.contrib.rnn.DropoutWrapper(\n",
    "                ChildSumTreeLSTM(lstm_num_units, keep_prob=keep_prob),\n",
    "                input_keep_prob=keep_prob,output_keep_prob=keep_prob),\n",
    "            name_or_scope='tree_lstm1')\n",
    "\n",
    "    tree_lstm2 = td.ScopedLayer(\n",
    "            tf.contrib.rnn.DropoutWrapper(\n",
    "                ChildSumTreeLSTM(lstm_num_units, keep_prob=keep_prob),\n",
    "                input_keep_prob=keep_prob,output_keep_prob=keep_prob),\n",
    "            name_or_scope='tree_lstm2')\n",
    "\n",
    "    tree_lstm3 = td.ScopedLayer(\n",
    "            tf.contrib.rnn.DropoutWrapper(\n",
    "                ChildSumTreeLSTM(lstm_num_units, keep_prob=keep_prob),\n",
    "                input_keep_prob=keep_prob,output_keep_prob=keep_prob),\n",
    "            name_or_scope='tree_lstm3')\n",
    "\n",
    "    tree_lstm4 = td.ScopedLayer(\n",
    "            tf.contrib.rnn.DropoutWrapper(\n",
    "                ChildSumTreeLSTM(lstm_num_units, keep_prob=keep_prob),\n",
    "                input_keep_prob=keep_prob,output_keep_prob=keep_prob),\n",
    "            name_or_scope='tree_lstm4')\n",
    "\n",
    "    tree_lstm5 = td.ScopedLayer(\n",
    "            tf.contrib.rnn.DropoutWrapper(\n",
    "                ChildSumTreeLSTM(lstm_num_units, keep_prob=keep_prob),\n",
    "                input_keep_prob=keep_prob,output_keep_prob=keep_prob),\n",
    "            name_or_scope='tree_lstm5')\n",
    "    tree_lstm6 = td.ScopedLayer(\n",
    "            tf.contrib.rnn.DropoutWrapper(\n",
    "                ChildSumTreeLSTM(lstm_num_units, keep_prob=keep_prob),\n",
    "                input_keep_prob=keep_prob,output_keep_prob=keep_prob),\n",
    "            name_or_scope='tree_lstm6')\n",
    "\n",
    "    # 建立输出层,NUM_CLASS决定输出类别数\n",
    "    output_layer = td.FC(NUM_CLASS, activation=None, name='output_layer')   \n",
    "    \n",
    "    sub_cut = td.ForwardDeclaration(name=\"sub_cut\")\n",
    "    \n",
    "    def lookup_word(word):\n",
    "        return vocab.index(word)\n",
    "    \n",
    "    # 计算tree-lstm中(logit,state)的函数\n",
    "    # 传入的参数格式是[root.vector,[child_1,child_2,child_3...]] ---- 父亲节点\n",
    "    # 另一种参数格式是[root.vector] ---- 叶子节点\n",
    "    def logit_and_state():\n",
    "\n",
    "        # 获得树结构的长度，1代表叶子节点，大于1则说明是父节点\n",
    "        def get_length(root):\n",
    "            length = len(root)\n",
    "            if length > 1:\n",
    "                length = 1 + len(root[1])\n",
    "            #print 'The length: %d' % length\n",
    "            return length\n",
    "\n",
    "        word2vec = td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        zeros_state = td.Zeros((tree_lstm1.state_size,)*2,name='zeros_state')        \n",
    "        word_case = td.AllOf(word2vec,zeros_state,name='word_case') >> tree_lstm2\n",
    "\n",
    "        # 声明迭代的block，每一个sub_cut()生成一个新的block并传入孩子节点\n",
    "        pair2vec1 = (sub_cut(),) \n",
    "        pair2vec2 = (sub_cut(),sub_cut())\n",
    "        pair2vec3 = (sub_cut(),sub_cut(),sub_cut())\n",
    "        pair2vec4 = (sub_cut(),sub_cut(),sub_cut(),sub_cut())\n",
    "        pair2vec5 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut())\n",
    "        pair2vec6 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut())\n",
    "        #pair2vec7 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut()) \n",
    "        #pair2vec8 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut()) \n",
    "        #pair2vec9 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut())\n",
    "        #pair2vec10 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut()) \n",
    "        #pair2vec11 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),\n",
    "        #              sub_cut()) \n",
    "        #pair2vec12 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),\n",
    "        #              sub_cut(),sub_cut())   \n",
    "        #pair2vec13 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),\n",
    "        #              sub_cut(),sub_cut(),sub_cut())\n",
    "        #pair2vec14 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),\n",
    "        #              sub_cut(),sub_cut(),sub_cut(),sub_cut())\n",
    "        #pair2vec15 = (sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut(),\n",
    "        #              sub_cut(),sub_cut(),sub_cut(),sub_cut(),sub_cut())   \n",
    "        # 获取当前节点vector，并且把孩子节点传入sub_cut()\n",
    "        pair_case1 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "                              ,td.GetItem(1)>>pair2vec1,name='pair_case1') >> tree_lstm1\n",
    "        pair_case2 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "                              ,td.GetItem(1)>>pair2vec2,name='pair_case2') >> tree_lstm2  \n",
    "        pair_case3 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "                              ,td.GetItem(1)>>pair2vec3,name='pair_case3') >> tree_lstm3\n",
    "        pair_case4 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "                              ,td.GetItem(1)>>pair2vec4,name='pair_case4') >> tree_lstm4\n",
    "        pair_case5 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "                              ,td.GetItem(1)>>pair2vec5,name='pair_case5') >> tree_lstm5\n",
    "        pair_case6 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "                              ,td.GetItem(1)>>pair2vec6,name='pair_case6') >> tree_lstm6\n",
    "        #pair_case7 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec7,name='pair_case7') >> tree_lstm7  \n",
    "        #pair_case8 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec8,name='pair_case8') >> tree_lstm8\n",
    "        #pair_case9 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec9,name='pair_case9') >> tree_lstm9\n",
    "        #pair_case10 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec10,name='pair_case10') >> tree_lstm10\n",
    "        #pair_case11 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec11,name='pair_case11') >> tree_lstm11\n",
    "        #pair_case12 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec12,name='pair_case12') >> tree_lstm12  \n",
    "        #pair_case13 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec13,name='pair_case13') >> tree_lstm13\n",
    "        #pair_case14 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec14,name='pair_case14') >> tree_lstm14\n",
    "        #pair_case15 = td.AllOf(td.GetItem(0) >> td.InputTransform(lookup_word) >> td.Scalar('int32') >> word_embedding\n",
    "        #                      ,td.GetItem(1)>>pair2vec15,name='pair_case15') >> tree_lstm15\n",
    "\n",
    "        # 树结构的判断语句，根据传入的树结构(父节点/叶子节点)执行对应的递归block\n",
    "        ans = td.OneOf(get_length,[(1,word_case),(2,pair_case1),(3,pair_case2),\n",
    "                                   (4,pair_case3),(5,pair_case4),(6,pair_case5),\n",
    "                                   (7,pair_case6)],name='ans')\n",
    "\n",
    "        # 最终输出是(logits,states)\n",
    "        last = ans >> (output_layer,td.Identity())\n",
    "        return last\n",
    "\n",
    "    # 定义模型以及递归函数\n",
    "    model = emb_tree(logit_and_state(),is_root=True)\n",
    "    sub_cut.resolve_to(emb_tree(logit_and_state(),is_root=False))\n",
    "\n",
    "#    print \"mode #############################################\"\n",
    "#    block_info(model)\n",
    "#    block_info(sub_cut)\n",
    "    # 想要显示这里就需要加上print\n",
    "    print(\"Create completed!\")\n",
    "    \n",
    "    \n",
    "    compiler = td.Compiler.create(model)\n",
    "#    print('input type: %s' % model.input_type)\n",
    "#    print('output type: %s' % model.output_type)\n",
    "    \n",
    "    metrics = {k: tf.reduce_mean(v) for k,v in compiler.metric_tensors.items()}\n",
    "    metrics\n",
    "    \n",
    "    return compiler,metrics\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 递归执行函数，model和递归sub_cut()均会编译这一部分，is_root规定了该点是否是根节点\n",
    "def emb_tree(logit_and_state,is_root):\n",
    "    # 判断为根节点时会分离出label，之后的递归会设定为非根节点而执行else语句\n",
    "    if is_root == True:\n",
    "        return td.InputTransform(cut_tree_and_get_label) >> (td.Scalar('int32'),logit_and_state) >> addmetric()\n",
    "    else:\n",
    "        return td.InputTransform(cut_tree) >> logit_and_state >> addmetric2()\n",
    "# 对非根节点的节点裁剪，输出格式为[[tree.vector],[child_1,child_2...]] ---- 父节点\n",
    "# 如果该节点是叶子节点，输出格式为[[tree.vector]] ---- 叶子节点\n",
    "def cut_tree(tree):\n",
    "    current = [tree.word]\n",
    "    if (tree.num_children) > 0:\n",
    "        current.append(tree.children)\n",
    "    #print \"current: \",current\n",
    "    return current\n",
    "\n",
    "# 对有label的数据进行裁剪，输出格式为(label,[tree.vector,[child_1,child_2...]])\n",
    "def cut_tree_and_get_label(label_and_tree):\n",
    "    #label = label_and_tree[0]\n",
    "    tree = label_and_tree\n",
    "    label = tree.gold_label\n",
    "    current = [tree.word]\n",
    "    if(tree.num_children) > 0:\n",
    "        current.append(tree.children)\n",
    "    #print 'The label: %d, The current: %s' % (label,current)\n",
    "    return label,current\n",
    "\n",
    "# 这里是对根节点的数据计算，包括了传递state和loss函数的统计\n",
    "def addmetric():\n",
    "    c = td.Composition()\n",
    "    with c.scope():\n",
    "        # 这里input是一个(label,(logit,state))格式的输入\n",
    "        labels = c.input[0]\n",
    "        logits = td.GetItem(0).reads(c.input[1])\n",
    "        state = td.GetItem(1).reads(c.input[1])\n",
    "        \n",
    "        # 计算loss函数\n",
    "        loss = td.Function(tf_node_loss)\n",
    "        td.Metric('root_loss').reads(loss.reads(logits,labels))\n",
    "        \n",
    "        # 计算击中率(fine-grained hit)\n",
    "        hits = td.Function(tf_fine_grained_hits)\n",
    "        td.Metric('root_hits').reads(hits.reads(logits,labels))\n",
    "        \n",
    "        # 这里输出分类logits和state\n",
    "        c.output.reads(loss,hits,logits,state)\n",
    "    return c       \n",
    "\n",
    "# 这里只传递计算出来的孩子state\n",
    "def addmetric2():\n",
    "    c = td.Composition()\n",
    "    with c.scope():\n",
    "        #logit = td.GetItem(0).reads(c.input)\n",
    "        state = td.GetItem(1).reads(c.input)\n",
    "        \n",
    "        c.output.reads(state)\n",
    "    return c\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对预测结果和实际结果计算cross_entropy\n",
    "def tf_node_loss(logits, labels):\n",
    "    #labels_one_hot = tf.reshape(labels,logits.get_shape())\n",
    "    return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels)\n",
    "\n",
    "# 对预测结果取整计算击中率(正确为1，错误为0)\n",
    "def tf_fine_grained_hits(logits, labels):\n",
    "    # 找到分类中类别最大的，和label对比，计算击中率，类别以0起始\n",
    "    prediction = tf.cast(tf.argmax(logits,1),tf.int32)\n",
    "#    labels_oneof_to_one = tf.cast(tf.argmax(labels,1),tf.int32)\n",
    "    return tf.cast(tf.equal(prediction, labels),tf.float64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.reset_default_graph()\n",
    "learning_rate = 0.05\n",
    "keep_prob_now = 0.75\n",
    "batch_size = 256\n",
    "epoch = 70\n",
    "num_gpus = 1\n",
    "embedding_learning_rate_factor = 0.1\n",
    "keep_prob_ph = tf.placeholder_with_default(1.0,[])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "\n",
    "def train_step(epo,batch_count,num,batch,losses):\n",
    "    train_feed_dict[compiler.loom_input_tensor] = batch\n",
    "    _,batch_loss = sess.run([train,loss],train_feed_dict)\n",
    "    losses.append(batch_loss)\n",
    "    avg_loss = sum(losses) / len(losses)\n",
    "    sys.stdout.write('\\rTrain: Epoch %d-%d-%d, loss: %.4f' %(epo+1, batch_count+1, num, avg_loss))\n",
    "\n",
    "    #print(\"batch %d---%d, batch_loss: %.4f, time: %f\" % (epo+1, num, batch_loss, end-start))      \n",
    "    return batch_loss\n",
    "\n",
    "# 统计该epoch下每一个batch的loss之和\n",
    "def train_epoch(epo,batch_count,train_set,losses):\n",
    "    return sum(train_step(epo,batch_count,num,batch,losses) for num,batch in enumerate(td.group_by_batches(train_set,batch_size),1))\n",
    "\n",
    "def build_valid_data(dataset):\n",
    "    labels = []\n",
    "    for i in range(len(dataset)):\n",
    "        labels.append(dataset[i].gold_label)\n",
    "    return dataset, labels\n",
    "\n",
    "def valid_metric(predictions, labels):\n",
    "    correct = np.equal(predictions, labels)\n",
    "    return correct\n",
    "\n",
    "def get_valid_batch(valid_data, valid_labels, batchsize):\n",
    "    n_batch = int(len(valid_data)/batchsize)\n",
    "    if len(valid_data)% batchsize != 0:\n",
    "        n_batch += 1\n",
    "    for i in range(n_batch):\n",
    "        yield valid_data[i*batchsize:(i+1)*batchsize], valid_labels[i*batchsize:(i+1)*batchsize]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Create the model\n",
      "Create completed!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/x/anaconda3/envs/tf1py2/lib/python2.7/site-packages/tensorflow/python/ops/gradients_impl.py:91: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.\n",
      "  \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n"
     ]
    }
   ],
   "source": [
    "best_accuracy = 0.0\n",
    "\n",
    "print('Create the model')\n",
    "    \n",
    "word_embedding = create_embedding(weight_matrix=weight_matrix)\n",
    "compiler, metrics = create_model(word_embedding=word_embedding,NUM_CLASS=26021,lstm_num_units=300,\n",
    "                                 keep_prob=keep_prob_ph,vocab=vocab)\n",
    "loss = tf.reduce_mean(compiler.metric_tensors['root_loss'])\n",
    "opt = tf.train.AdagradOptimizer(learning_rate=learning_rate)\n",
    "grads_and_vars = opt.compute_gradients(loss)\n",
    "\n",
    "capped_gradients = [(tf.clip_by_value(grad,-1.,1.),var) for grad,var in grads_and_vars if grad is not None]\n",
    "\n",
    "# 训练embedding层时，弱化bp时候的梯度\n",
    "found = 0\n",
    "for i,(grad,var) in enumerate(capped_gradients):\n",
    "    if var == word_embedding.weights:\n",
    "        found+=1\n",
    "        grad = tf.scalar_mul(embedding_learning_rate_factor,grad)\n",
    "        capped_gradients[i] = (grad,var)\n",
    "assert found == 1 \n",
    "\n",
    "train = opt.apply_gradients(capped_gradients)\n",
    "saver = tf.train.Saver()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "reading the lastest version...\n",
      "training the model\n",
      "Training epoch    1\n",
      "Train: Epoch 1-34-391, loss: 3.0139"
     ]
    }
   ],
   "source": [
    "best_accuracy = 0.4047\n",
    "best_val_acc_epoch = 0\n",
    "#save_path = os.path.join(data_dir,'treelstm_method_model')\n",
    "global_epo = 0\n",
    "run_id = \"_\".join([time.strftime(\"%Y-%m-%d-%H-%M-%S\"), str(os.getpid())])\n",
    "save_path = \"/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-%s\" % run_id\n",
    "checkpoint = '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524'\n",
    "\n",
    "with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True)) as sess:\n",
    "    print('reading the lastest version...')\n",
    "    saver.restore(sess,checkpoint)\n",
    "    \n",
    "    train_feed_dict = {keep_prob_ph:keep_prob_now}\n",
    "\n",
    "    with compiler.multiprocessing_pool():\n",
    "        print('training the model')\n",
    "        # 设置epoch数量\n",
    "        for epo in range(epoch):\n",
    "            # 这是对数据库的分片\n",
    "            #training\n",
    "           \n",
    "            losses = []\n",
    "            print(\"Training epoch %4d\" % (epo + 1))\n",
    "            start = time.time()\n",
    "            train_count = 0\n",
    "            for num in range(79):\n",
    "                #print(num)\n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp = select(\"trainingdataset\", num*100000,100000)\n",
    "                train_count += len(dataset_temp)\n",
    "                np.random.shuffle(dataset_temp)\n",
    "                train_data_epo_for_shuffled = compiler.build_loom_inputs(dataset_temp)\n",
    "                for shuffled in td.epochs(train_data_epo_for_shuffled,1): \n",
    "                    train_loss = train_epoch(epo,num,shuffled,losses)\n",
    "                    #print(\"batch %d---%d, batch_loss: %.4f, time: %f\" % (epo+1, num, avg_loss,1))      \n",
    "                    #sys.stdout.write('\\rTrain: Epoch %d-%d-%d, loss: %.4f' %(epo+1, num, batch_count, avg_loss))\n",
    "                del dataset_temp\n",
    "                del train_data_epo_for_shuffled\n",
    "                gc.collect()\n",
    "                #print(\"\\rTrain: epo:%4d--%d, time:%f\" % (epo+1,num+1,end-start))\n",
    "            end = time.time()\n",
    "    \n",
    "            m,s = divmod(end-start,60)\n",
    "            h,m = divmod(m,60)\n",
    "            time_str = \"%02d:%02d:%02d\" % (h,m,s)\n",
    "            print('  Training: time: %s, count: %4d' %(time_str,train_count))\n",
    "            \n",
    "            print(\"valid epoch %4d\" % (epo + 1))\n",
    "            valid_positive = 0\n",
    "            counts = 0\n",
    "            valid_count = 0\n",
    "            valid_counts = 0\n",
    "            start = time.time()\n",
    "            for num in range(6):\n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp_va = select(\"validdataset\", num*100000,100000)   \n",
    "                valid_count += len(dataset_temp_va)\n",
    "                valid_data, valid_labels = build_valid_data(dataset_temp_va) \n",
    "                for nn, (val_loop_input, val_label) in enumerate(get_valid_batch(valid_data, valid_labels, batch_size)):\n",
    "                    cur_valid_data = compiler.build_feed_dict(val_loop_input)\n",
    "                    #cur_valid_data[keep_prob_ph] = 1.0\n",
    "                    cur_metrics = sess.run(metrics, cur_valid_data)\n",
    "                    valid_loss, valid_hit_rate = cur_metrics[\"root_loss\"], cur_metrics[\"root_hits\"]\n",
    "                    valid_positive += valid_hit_rate\n",
    "                    valid_counts += 1\n",
    "                    counts += len(val_label)\n",
    "                    sys.stdout.write('\\rValid: Epoch %d-%d-%d, hit_rate: %.4f, loss: %.4f' %(epo+1, num+1,nn+1,valid_hit_rate, valid_loss))\n",
    "\n",
    "                del dataset_temp_va\n",
    "                del valid_data\n",
    "                del valid_labels\n",
    "                gc.collect()\n",
    "            end = time.time()\n",
    "            ans = float(valid_positive) / valid_counts\n",
    "            m,s = divmod(end-start,60)\n",
    "            h,m = divmod(m,60)\n",
    "            time_str = \"%02d:%02d:%02d\" % (h,m,s)\n",
    "           \n",
    "            print('\\nValid: accuracy: %.4f, time: %s, counts: %4d' %(ans, time_str, counts))\n",
    "            \n",
    "            print(\"Test epoch %4d\" % (epo + 1))\n",
    "            test_positive = 0\n",
    "            test_counts = 0\n",
    "            test_count = 0\n",
    "            counts = 0\n",
    "            start = time.time()\n",
    "            for num in range(1):\n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp_te = select(\"testdataset\", num*100000,100000)   \n",
    "                test_count += len(dataset_temp_te)\n",
    "                test_data, test_labels = build_valid_data(dataset_temp_te)  \n",
    "                for nn, (test_loop_input, test_label) in enumerate(get_valid_batch(test_data, test_labels, batch_size)):\n",
    "                    cur_test_data = compiler.build_feed_dict(test_loop_input)\n",
    "                    #cur_test_data[keep_prob_ph] = 1.0\n",
    "                    cur_metrics = sess.run(metrics, cur_test_data)\n",
    "                    test_loss, test_hit_rate = cur_metrics[\"root_loss\"], cur_metrics[\"root_hits\"]\n",
    "                    test_positive += test_hit_rate\n",
    "                    test_counts += 1\n",
    "                    counts += len(test_label)\n",
    "                    #sys.stdout.write\n",
    "                    sys.stdout.write('\\rTest: Epoch %d-%d-%d, hit_rate: %.4f, loss: %.4f' %(epo+1, num+1,nn+1,test_hit_rate, test_loss))\n",
    "                del dataset_temp_te\n",
    "                del test_data\n",
    "                del test_labels\n",
    "                gc.collect()\n",
    "            end = time.time()\n",
    "            test_ans = float(test_positive) / test_counts\n",
    "            m,s = divmod(end-start,60)\n",
    "            h,m = divmod(m,60)\n",
    "            time_str = \"%02d:%02d:%02d\" % (h,m,s)\n",
    "            print('\\nTest: accuracy: %.4f, time: %s,count: %4d' %(test_ans, time_str, counts))\n",
    "            \n",
    "            if ans > best_accuracy:\n",
    "                    checkpoint_path = saver.save(sess, save_path)\n",
    "                    print(\"  (Best epoch so far, cum. val. acc increased to %.5f from %.5f. Saving to '%s')\" % (ans, best_accuracy, checkpoint_path))\n",
    "                    best_accuracy = ans\n",
    "                    best_val_acc_epoch = epo\n",
    "            elif epo - best_val_acc_epoch >= 5:\n",
    "                    print(\"Stopping training after %i epochs without improvement on validation accuracy.\" % 5)\n",
    "                    break\n",
    "\n",
    "#             #testing\n",
    "#             test_losses = []\n",
    "#             for num in range(1):\n",
    "#                 read_start = time.time()\n",
    "#                 # 设置读取数据的起点和长度\n",
    "#                 dataset_temp = select(\"testdataset\", num*1000,1000)\n",
    "#                 np.random.shuffle(dataset_temp)\n",
    "#                 test_data_epo_for_shuffled = compiler.build_loom_inputs(dataset_temp)\n",
    "#                 start = time.time()\n",
    "#                 for shuffled in td.epochs(test_data_epo_for_shuffled,1):                    \n",
    "#                     test_loss = train_epoch(num,shuffled)\n",
    "#                     test_losses.append(test_loss)\n",
    "#                     avg_test_loss = sum(test_losses) / len(test_losses)\n",
    "#                     sys.stdout.write('\\rTest: batch %d---%d, batch_loss: %.4f' %(epo+1, num, avg_test_loss))\n",
    "#                 end = time.time()\n",
    "#                 test_data_epo_for_shuffled = None\n",
    "#                 print(\"Test: epo:%4d--%d, time:%f\" % (epo+1,num+1,end-start))\n",
    "            \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "initializing tensorflow\n",
      "training the model\n",
      "Training epoch    1\n",
      "Train: Epoch 1-79-78, loss: 5.6749  Training: time: 10:04:57, count: 7782763\n",
      "valid epoch    1\n",
      "Valid: Epoch 1-6-370, hit_rate: 0.0637, loss: 8.04483\n",
      "Valid: accuracy: 0.1542, time: 00:25:21, counts: 593621\n",
      "Test epoch    1\n",
      "Test: Epoch 1-1-39, hit_rate: 0.1222, loss: 7.0835\n",
      "Test: accuracy: 0.1909, time: 00:00:23,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.15424 from 0.00000. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    2\n",
      "Train: Epoch 2-79-78, loss: 4.9121  Training: time: 10:13:17, count: 7782763\n",
      "valid epoch    2\n",
      "Valid: Epoch 2-6-370, hit_rate: 0.1720, loss: 7.45542\n",
      "Valid: accuracy: 0.2039, time: 00:25:08, counts: 593621\n",
      "Test epoch    2\n",
      "Test: Epoch 2-1-39, hit_rate: 0.0556, loss: 6.5914\n",
      "Test: accuracy: 0.2291, time: 00:00:20,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.20389 from 0.15424. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    3\n",
      "Train: Epoch 3-79-78, loss: 4.5488  Training: time: 10:09:11, count: 7782763\n",
      "valid epoch    3\n",
      "Valid: Epoch 3-6-370, hit_rate: 0.2102, loss: 6.84735\n",
      "Valid: accuracy: 0.2314, time: 00:25:02, counts: 593621\n",
      "Test epoch    3\n",
      "Test: Epoch 3-1-39, hit_rate: 0.1556, loss: 6.2761\n",
      "Test: accuracy: 0.2528, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.23136 from 0.20389. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    4\n",
      "Train: Epoch 4-79-78, loss: 4.2883  Training: time: 10:10:12, count: 7782763\n",
      "valid epoch    4\n",
      "Valid: Epoch 4-6-370, hit_rate: 0.2102, loss: 6.55065\n",
      "Valid: accuracy: 0.2541, time: 00:25:10, counts: 593621\n",
      "Test epoch    4\n",
      "Test: Epoch 4-1-39, hit_rate: 0.1778, loss: 6.1747\n",
      "Test: accuracy: 0.2672, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.25413 from 0.23136. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    5\n",
      "Train: Epoch 5-79-78, loss: 4.1003  Training: time: 10:10:07, count: 7782763\n",
      "valid epoch    5\n",
      "Valid: Epoch 5-6-370, hit_rate: 0.2038, loss: 6.24028\n",
      "Valid: accuracy: 0.2743, time: 00:25:14, counts: 593621\n",
      "Test epoch    5\n",
      "Test: Epoch 5-1-39, hit_rate: 0.1778, loss: 5.9671\n",
      "Test: accuracy: 0.2872, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.27426 from 0.25413. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    6\n",
      "Train: Epoch 6-79-78, loss: 3.9430  Training: time: 10:01:47, count: 7782763\n",
      "valid epoch    6\n",
      "Valid: Epoch 6-6-370, hit_rate: 0.2166, loss: 5.97136\n",
      "Valid: accuracy: 0.2920, time: 00:24:40, counts: 593621\n",
      "Test epoch    6\n",
      "Test: Epoch 6-1-39, hit_rate: 0.2000, loss: 5.7792\n",
      "Test: accuracy: 0.2999, time: 00:00:20,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.29203 from 0.27426. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    7\n",
      "Train: Epoch 7-79-78, loss: 3.8109  Training: time: 10:02:43, count: 7782763\n",
      "valid epoch    7\n",
      "Valid: Epoch 7-6-370, hit_rate: 0.2484, loss: 5.77734\n",
      "Valid: accuracy: 0.3055, time: 00:25:14, counts: 593621\n",
      "Test epoch    7\n",
      "Test: Epoch 7-1-39, hit_rate: 0.2000, loss: 5.6930\n",
      "Test: accuracy: 0.3066, time: 00:00:20,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.30553 from 0.29203. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    8\n",
      "Train: Epoch 8-79-78, loss: 3.7049  Training: time: 10:18:53, count: 7782763\n",
      "valid epoch    8\n",
      "Valid: Epoch 8-6-370, hit_rate: 0.2548, loss: 5.60811\n",
      "Valid: accuracy: 0.3182, time: 00:25:05, counts: 593621\n",
      "Test epoch    8\n",
      "Test: Epoch 8-1-39, hit_rate: 0.2000, loss: 5.6334\n",
      "Test: accuracy: 0.3130, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.31817 from 0.30553. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch    9\n",
      "Train: Epoch 9-79-78, loss: 3.6136  Training: time: 10:11:51, count: 7782763\n",
      "valid epoch    9\n",
      "Valid: Epoch 9-6-370, hit_rate: 0.2803, loss: 5.46566\n",
      "Valid: accuracy: 0.3277, time: 00:25:30, counts: 593621\n",
      "Test epoch    9\n",
      "Test: Epoch 9-1-39, hit_rate: 0.2000, loss: 5.5258\n",
      "Test: accuracy: 0.3163, time: 00:00:20,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.32771 from 0.31817. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   10\n",
      "Train: Epoch 10-79-78, loss: 3.5331  Training: time: 10:34:09, count: 7782763\n",
      "valid epoch   10\n",
      "Valid: Epoch 10-6-370, hit_rate: 0.2611, loss: 5.34845\n",
      "Valid: accuracy: 0.3359, time: 00:25:36, counts: 593621\n",
      "Test epoch   10\n",
      "Test: Epoch 10-1-39, hit_rate: 0.2333, loss: 5.4660\n",
      "Test: accuracy: 0.3181, time: 00:00:23,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.33586 from 0.32771. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   11\n",
      "Train: Epoch 11-79-78, loss: 3.4637  Training: time: 10:20:52, count: 7782763\n",
      "valid epoch   11\n",
      "Valid: Epoch 11-6-370, hit_rate: 0.2611, loss: 5.21158\n",
      "Valid: accuracy: 0.3445, time: 00:25:21, counts: 593621\n",
      "Test epoch   11\n",
      "Test: Epoch 11-1-39, hit_rate: 0.2333, loss: 5.3138\n",
      "Test: accuracy: 0.3245, time: 00:00:23,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.34448 from 0.33586. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   12\n",
      "Train: Epoch 12-79-78, loss: 3.4008  Training: time: 10:15:37, count: 7782763\n",
      "valid epoch   12\n",
      "Valid: Epoch 12-6-370, hit_rate: 0.2739, loss: 5.07641\n",
      "Valid: accuracy: 0.3526, time: 00:25:18, counts: 593621\n",
      "Test epoch   12\n",
      "Test: Epoch 12-1-39, hit_rate: 0.2333, loss: 5.2282\n",
      "Test: accuracy: 0.3264, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.35257 from 0.34448. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   13\n",
      "Train: Epoch 13-79-78, loss: 3.3447  Training: time: 10:15:39, count: 7782763\n",
      "valid epoch   13\n",
      "Valid: Epoch 13-6-370, hit_rate: 0.2611, loss: 4.96438\n",
      "Valid: accuracy: 0.3575, time: 00:25:05, counts: 593621\n",
      "Test epoch   13\n",
      "Test: Epoch 13-1-39, hit_rate: 0.2333, loss: 5.1424\n",
      "Test: accuracy: 0.3306, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.35748 from 0.35257. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   14\n",
      "Train: Epoch 14-79-78, loss: 3.2967  Training: time: 10:17:46, count: 7782763\n",
      "valid epoch   14\n",
      "Valid: Epoch 14-6-370, hit_rate: 0.2739, loss: 4.84814\n",
      "Valid: accuracy: 0.3615, time: 00:25:44, counts: 593621\n",
      "Test epoch   14\n",
      "Test: Epoch 14-1-39, hit_rate: 0.2333, loss: 5.1014\n",
      "Test: accuracy: 0.3363, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.36146 from 0.35748. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   15\n",
      "Train: Epoch 15-79-78, loss: 3.2488  Training: time: 10:34:05, count: 7782763\n",
      "valid epoch   15\n",
      "Valid: Epoch 15-6-370, hit_rate: 0.2866, loss: 4.76356\n",
      "Valid: accuracy: 0.3675, time: 00:25:42, counts: 593621\n",
      "Test epoch   15\n",
      "Test: Epoch 15-1-39, hit_rate: 0.2333, loss: 5.0161\n",
      "Test: accuracy: 0.3381, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.36748 from 0.36146. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   16\n",
      "Train: Epoch 16-79-78, loss: 3.2087  Training: time: 10:28:04, count: 7782763\n",
      "valid epoch   16\n",
      "Valid: Epoch 16-6-370, hit_rate: 0.3121, loss: 4.69238\n",
      "Valid: accuracy: 0.3717, time: 00:25:01, counts: 593621\n",
      "Test epoch   16\n",
      "Test: Epoch 16-1-39, hit_rate: 0.2333, loss: 4.9580\n",
      "Test: accuracy: 0.3389, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.37170 from 0.36748. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   17\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train: Epoch 17-79-78, loss: 3.1722  Training: time: 10:12:57, count: 7782763\n",
      "valid epoch   17\n",
      "Valid: Epoch 17-6-370, hit_rate: 0.3312, loss: 4.61109\n",
      "Valid: accuracy: 0.3752, time: 00:25:25, counts: 593621\n",
      "Test epoch   17\n",
      "Test: Epoch 17-1-39, hit_rate: 0.2333, loss: 4.9129\n",
      "Test: accuracy: 0.3425, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.37516 from 0.37170. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   18\n",
      "Train: Epoch 18-79-78, loss: 3.1380  Training: time: 10:38:00, count: 7782763\n",
      "valid epoch   18\n",
      "Valid: Epoch 18-6-370, hit_rate: 0.3185, loss: 4.53434\n",
      "Valid: accuracy: 0.3789, time: 00:25:09, counts: 593621\n",
      "Test epoch   18\n",
      "Test: Epoch 18-1-39, hit_rate: 0.2333, loss: 4.8528\n",
      "Test: accuracy: 0.3453, time: 00:00:20,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.37889 from 0.37516. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   19\n",
      "Train: Epoch 19-79-78, loss: 3.1050  Training: time: 10:14:04, count: 7782763\n",
      "valid epoch   19\n",
      "Valid: Epoch 19-6-370, hit_rate: 0.3248, loss: 4.48504\n",
      "Valid: accuracy: 0.3813, time: 00:25:01, counts: 593621\n",
      "Test epoch   19\n",
      "Test: Epoch 19-1-39, hit_rate: 0.2333, loss: 4.7964\n",
      "Test: accuracy: 0.3451, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.38131 from 0.37889. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   20\n",
      "Train: Epoch 20-79-78, loss: 3.0733  Training: time: 10:14:21, count: 7782763\n",
      "valid epoch   20\n",
      "Valid: Epoch 20-6-370, hit_rate: 0.3439, loss: 4.38513\n",
      "Valid: accuracy: 0.3852, time: 00:25:12, counts: 593621\n",
      "Test epoch   20\n",
      "Test: Epoch 20-1-39, hit_rate: 0.2333, loss: 4.7654\n",
      "Test: accuracy: 0.3481, time: 00:00:23,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.38519 from 0.38131. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   21\n",
      "Train: Epoch 21-79-78, loss: 3.0461  Training: time: 10:18:23, count: 7782763\n",
      "valid epoch   21\n",
      "Valid: Epoch 21-6-370, hit_rate: 0.3376, loss: 4.34918\n",
      "Valid: accuracy: 0.3882, time: 00:25:13, counts: 593621\n",
      "Test epoch   21\n",
      "Test: Epoch 21-1-39, hit_rate: 0.2333, loss: 4.7007\n",
      "Test: accuracy: 0.3495, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.38821 from 0.38519. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   22\n",
      "Train: Epoch 22-79-78, loss: 3.0207  Training: time: 10:15:52, count: 7782763\n",
      "valid epoch   22\n",
      "Valid: Epoch 22-6-370, hit_rate: 0.3567, loss: 4.28862\n",
      "Valid: accuracy: 0.3905, time: 00:25:13, counts: 593621\n",
      "Test epoch   22\n",
      "Test: Epoch 22-1-39, hit_rate: 0.2333, loss: 4.6490\n",
      "Test: accuracy: 0.3507, time: 00:00:23,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.39049 from 0.38821. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   23\n",
      "Train: Epoch 23-79-78, loss: 2.9969  Training: time: 10:25:08, count: 7782763\n",
      "valid epoch   23\n",
      "Valid: Epoch 23-6-370, hit_rate: 0.3312, loss: 4.22028\n",
      "Valid: accuracy: 0.3861, time: 00:25:26, counts: 593621\n",
      "Test epoch   23\n",
      "Test: Epoch 23-1-39, hit_rate: 0.2444, loss: 4.6623\n",
      "Test: accuracy: 0.3469, time: 00:00:21,count: 9818\n",
      "Training epoch   24\n",
      "Train: Epoch 24-79-78, loss: 2.9747  Training: time: 10:27:15, count: 7782763\n",
      "valid epoch   24\n",
      "Valid: Epoch 24-6-370, hit_rate: 0.3631, loss: 4.17556\n",
      "Valid: accuracy: 0.3941, time: 00:25:24, counts: 593621\n",
      "Test epoch   24\n",
      "Test: Epoch 24-1-39, hit_rate: 0.2444, loss: 4.5695\n",
      "Test: accuracy: 0.3503, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.39411 from 0.39049. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   25\n",
      "Train: Epoch 25-79-78, loss: 2.9539  Training: time: 10:15:21, count: 7782763\n",
      "valid epoch   25\n",
      "Valid: Epoch 25-6-370, hit_rate: 0.3631, loss: 4.11220\n",
      "Valid: accuracy: 0.3946, time: 00:25:12, counts: 593621\n",
      "Test epoch   25\n",
      "Test: Epoch 25-1-39, hit_rate: 0.2444, loss: 4.5329\n",
      "Test: accuracy: 0.3528, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.39459 from 0.39411. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   26\n",
      "Train: Epoch 26-79-78, loss: 2.9325  Training: time: 10:16:48, count: 7782763\n",
      "valid epoch   26\n",
      "Valid: Epoch 26-6-370, hit_rate: 0.3694, loss: 4.05511\n",
      "Valid: accuracy: 0.3973, time: 00:25:09, counts: 593621\n",
      "Test epoch   26\n",
      "Test: Epoch 26-1-39, hit_rate: 0.2444, loss: 4.5260\n",
      "Test: accuracy: 0.3542, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.39728 from 0.39459. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   27\n",
      "Train: Epoch 27-79-78, loss: 2.9163  Training: time: 10:24:10, count: 7782763\n",
      "valid epoch   27\n",
      "Valid: Epoch 27-6-370, hit_rate: 0.3694, loss: 4.05488\n",
      "Valid: accuracy: 0.3993, time: 00:25:05, counts: 593621\n",
      "Test epoch   27\n",
      "Test: Epoch 27-1-39, hit_rate: 0.2444, loss: 4.4931\n",
      "Test: accuracy: 0.3554, time: 00:00:21,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.39929 from 0.39728. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   28\n",
      "Train: Epoch 28-79-78, loss: 2.8966  Training: time: 10:17:38, count: 7782763\n",
      "valid epoch   28\n",
      "Valid: Epoch 28-6-370, hit_rate: 0.3822, loss: 3.98239\n",
      "Valid: accuracy: 0.4024, time: 00:25:12, counts: 593621\n",
      "Test epoch   28\n",
      "Test: Epoch 28-1-39, hit_rate: 0.2556, loss: 4.4627\n",
      "Test: accuracy: 0.3564, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.40239 from 0.39929. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   29\n",
      "Train: Epoch 29-79-78, loss: 2.8765  Training: time: 10:15:51, count: 7782763\n",
      "valid epoch   29\n",
      "Valid: Epoch 29-6-370, hit_rate: 0.3758, loss: 3.92517\n",
      "Valid: accuracy: 0.4036, time: 00:25:23, counts: 593621\n",
      "Test epoch   29\n",
      "Test: Epoch 29-1-39, hit_rate: 0.2556, loss: 4.4054\n",
      "Test: accuracy: 0.3545, time: 00:00:23,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.40356 from 0.40239. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   30\n",
      "Train: Epoch 30-79-78, loss: 2.8603  Training: time: 10:20:28, count: 7782763\n",
      "valid epoch   30\n",
      "Valid: Epoch 30-6-370, hit_rate: 0.3822, loss: 3.91131\n",
      "Valid: accuracy: 0.4047, time: 00:25:36, counts: 593621\n",
      "Test epoch   30\n",
      "Test: Epoch 30-1-39, hit_rate: 0.2556, loss: 4.3923\n",
      "Test: accuracy: 0.3558, time: 00:00:22,count: 9818\n",
      "  (Best epoch so far, cum. val. acc increased to 0.40469 from 0.40356. Saving to '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-21-21-52_19524')\n",
      "Training epoch   31\n",
      "Train: Epoch 31-25-344, loss: 3.0053"
     ]
    }
   ],
   "source": [
    "best_accuracy = 0.0\n",
    "best_val_acc_epoch = 0\n",
    "#save_path = os.path.join(data_dir,'treelstm_method_model')\n",
    "global_epo = 0\n",
    "run_id = \"_\".join([time.strftime(\"%Y-%m-%d-%H-%M-%S\"), str(os.getpid())])\n",
    "save_path = \"/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-%s\" % run_id\n",
    "with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True)) as sess:\n",
    "\n",
    "    print('initializing tensorflow')\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    \n",
    "    train_feed_dict = {keep_prob_ph:keep_prob_now}\n",
    "\n",
    "    with compiler.multiprocessing_pool():\n",
    "        print('training the model')\n",
    "        # 设置epoch数量\n",
    "        for epo in range(epoch):\n",
    "            # 这是对数据库的分片\n",
    "            #training\n",
    "           \n",
    "            losses = []\n",
    "            print(\"Training epoch %4d\" % (epo + 1))\n",
    "            start = time.time()\n",
    "            train_count = 0\n",
    "            for num in range(79):\n",
    "                #print(num)\n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp = select(\"trainingdataset\", num*100000,100000)\n",
    "                train_count += len(dataset_temp)\n",
    "                np.random.shuffle(dataset_temp)\n",
    "                train_data_epo_for_shuffled = compiler.build_loom_inputs(dataset_temp)\n",
    "                for shuffled in td.epochs(train_data_epo_for_shuffled,1): \n",
    "                    train_loss = train_epoch(epo,num,shuffled,losses)\n",
    "                    #print(\"batch %d---%d, batch_loss: %.4f, time: %f\" % (epo+1, num, avg_loss,1))      \n",
    "                    #sys.stdout.write('\\rTrain: Epoch %d-%d-%d, loss: %.4f' %(epo+1, num, batch_count, avg_loss))\n",
    "                del dataset_temp\n",
    "                del train_data_epo_for_shuffled\n",
    "                gc.collect()\n",
    "                #print(\"\\rTrain: epo:%4d--%d, time:%f\" % (epo+1,num+1,end-start))\n",
    "            end = time.time()\n",
    "    \n",
    "            m,s = divmod(end-start,60)\n",
    "            h,m = divmod(m,60)\n",
    "            time_str = \"%02d:%02d:%02d\" % (h,m,s)\n",
    "            print('  Training: time: %s, count: %4d' %(time_str,train_count))\n",
    "            \n",
    "            print(\"valid epoch %4d\" % (epo + 1))\n",
    "            valid_positive = 0\n",
    "            counts = 0\n",
    "            valid_count = 0\n",
    "            valid_counts = 0\n",
    "            start = time.time()\n",
    "            for num in range(6):\n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp_va = select(\"validdataset\", num*100000,100000)   \n",
    "                valid_count += len(dataset_temp_va)\n",
    "                valid_data, valid_labels = build_valid_data(dataset_temp_va) \n",
    "                for nn, (val_loop_input, val_label) in enumerate(get_valid_batch(valid_data, valid_labels, batch_size)):\n",
    "                    cur_valid_data = compiler.build_feed_dict(val_loop_input)\n",
    "                    #cur_valid_data[keep_prob_ph] = 1.0\n",
    "                    cur_metrics = sess.run(metrics, cur_valid_data)\n",
    "                    valid_loss, valid_hit_rate = cur_metrics[\"root_loss\"], cur_metrics[\"root_hits\"]\n",
    "                    valid_positive += valid_hit_rate\n",
    "                    valid_counts += 1\n",
    "                    counts += len(val_label)\n",
    "                    sys.stdout.write('\\rValid: Epoch %d-%d-%d, hit_rate: %.4f, loss: %.4f' %(epo+1, num+1,nn+1,valid_hit_rate, valid_loss))\n",
    "\n",
    "                del dataset_temp_va\n",
    "                del valid_data\n",
    "                del valid_labels\n",
    "                gc.collect()\n",
    "            end = time.time()\n",
    "            ans = float(valid_positive) / valid_counts\n",
    "            m,s = divmod(end-start,60)\n",
    "            h,m = divmod(m,60)\n",
    "            time_str = \"%02d:%02d:%02d\" % (h,m,s)\n",
    "           \n",
    "            print('\\nValid: accuracy: %.4f, time: %s, counts: %4d' %(ans, time_str, counts))\n",
    "            \n",
    "            print(\"Test epoch %4d\" % (epo + 1))\n",
    "            test_positive = 0\n",
    "            test_counts = 0\n",
    "            test_count = 0\n",
    "            counts = 0\n",
    "            start = time.time()\n",
    "            for num in range(1):\n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp_te = select(\"testdataset\", num*100000,100000)   \n",
    "                test_count += len(dataset_temp_te)\n",
    "                test_data, test_labels = build_valid_data(dataset_temp_te)  \n",
    "                for nn, (test_loop_input, test_label) in enumerate(get_valid_batch(test_data, test_labels, batch_size)):\n",
    "                    cur_test_data = compiler.build_feed_dict(test_loop_input)\n",
    "                    #cur_test_data[keep_prob_ph] = 1.0\n",
    "                    cur_metrics = sess.run(metrics, cur_test_data)\n",
    "                    test_loss, test_hit_rate = cur_metrics[\"root_loss\"], cur_metrics[\"root_hits\"]\n",
    "                    test_positive += test_hit_rate\n",
    "                    test_counts += 1\n",
    "                    counts += len(test_label)\n",
    "                    #sys.stdout.write\n",
    "                    sys.stdout.write('\\rTest: Epoch %d-%d-%d, hit_rate: %.4f, loss: %.4f' %(epo+1, num+1,nn+1,test_hit_rate, test_loss))\n",
    "                del dataset_temp_te\n",
    "                del test_data\n",
    "                del test_labels\n",
    "                gc.collect()\n",
    "            end = time.time()\n",
    "            test_ans = float(test_positive) / test_counts\n",
    "            m,s = divmod(end-start,60)\n",
    "            h,m = divmod(m,60)\n",
    "            time_str = \"%02d:%02d:%02d\" % (h,m,s)\n",
    "            print('\\nTest: accuracy: %.4f, time: %s,count: %4d' %(test_ans, time_str, counts))\n",
    "            \n",
    "            if ans > best_accuracy:\n",
    "                    checkpoint_path = saver.save(sess, save_path)\n",
    "                    print(\"  (Best epoch so far, cum. val. acc increased to %.5f from %.5f. Saving to '%s')\" % (ans, best_accuracy, checkpoint_path))\n",
    "                    best_accuracy = ans\n",
    "                    best_val_acc_epoch = epo\n",
    "            elif epo - best_val_acc_epoch >= 5:\n",
    "                    print(\"Stopping training after %i epochs without improvement on validation accuracy.\" % 5)\n",
    "                    break\n",
    "\n",
    "#             #testing\n",
    "#             test_losses = []\n",
    "#             for num in range(1):\n",
    "#                 read_start = time.time()\n",
    "#                 # 设置读取数据的起点和长度\n",
    "#                 dataset_temp = select(\"testdataset\", num*1000,1000)\n",
    "#                 np.random.shuffle(dataset_temp)\n",
    "#                 test_data_epo_for_shuffled = compiler.build_loom_inputs(dataset_temp)\n",
    "#                 start = time.time()\n",
    "#                 for shuffled in td.epochs(test_data_epo_for_shuffled,1):                    \n",
    "#                     test_loss = train_epoch(num,shuffled)\n",
    "#                     test_losses.append(test_loss)\n",
    "#                     avg_test_loss = sum(test_losses) / len(test_losses)\n",
    "#                     sys.stdout.write('\\rTest: batch %d---%d, batch_loss: %.4f' %(epo+1, num, avg_test_loss))\n",
    "#                 end = time.time()\n",
    "#                 test_data_epo_for_shuffled = None\n",
    "#                 print(\"Test: epo:%4d--%d, time:%f\" % (epo+1,num+1,end-start))\n",
    "            \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取上一次训练的参数继续训练\n",
    "#save_path = 'data/1.04_new_fold_class_model/Zhao.class_model'\n",
    "#checkpoint = 'data/12.25_new_fold_class_model/Zhao.class_model-9'\n",
    "checkpoint = '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-13-20-58_7580'\n",
    "\n",
    "global_epo = 0\n",
    "losses = []\n",
    "with tf.Session() as sess:\n",
    "    print('reading the lastest version...')\n",
    "    saver.restore(sess,checkpoint)\n",
    "    with compiler.multiprocessing_pool():\n",
    "        print('training the model')\n",
    "        for epo in range(30,100):\n",
    "            print(\"Test epoch %4d\" % (epo + 1))\n",
    "            test_positive = 0\n",
    "            test_counts = 0\n",
    "            start = time.time()\n",
    "            for num in range(2):\n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp = select(\"validdataset\", num*1000,1000)   \n",
    "                test_data, test_labels = build_valid_data(dataset_temp)  \n",
    "                for nn, (loop_input, label) in enumerate(get_valid_batch(test_data, test_labels, batch_size)):\n",
    "                    sys.stdout.write('\\rTest: Epoch %d-%d-%d' %(epo+1, num+1,nn))\n",
    "                    cur_test_data = compiler.build_feed_dict(loop_input)\n",
    "                    cur_test_data[keep_prob_ph] = 1.0\n",
    "                    output = sess.run(compiler.output_tensors,cur_test_data)\n",
    "                    cur_loss, cur_model_output = output[0], output[2]\n",
    "                    cur_max_index = np.argmax(cur_model_output, axis=1)\n",
    "                    correct_num = valid_metric(cur_max_index, label)\n",
    "                    test_positive += sum(correct_num)\n",
    "                    test_counts += len(label)\n",
    "\n",
    "            end = time.time()\n",
    "            test_ans = float(test_positive) / test_counts\n",
    "            m,s = divmod(end-start,60)\n",
    "            h,m = divmod(m,60)\n",
    "            time_str = \"%02d:%02d:%02d\" % (h,m,s)\n",
    "            print('  Test: accuracy: %.4f, time: %s' %(test_ans, time_str))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取上一次训练的参数继续训练\n",
    "#save_path = 'data/1.04_new_fold_class_model/Zhao.class_model'\n",
    "#checkpoint = 'data/12.25_new_fold_class_model/Zhao.class_model-9'\n",
    "checkpoint = '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-13-20-58_7580'\n",
    "\n",
    "global_epo = 0\n",
    "losses = []\n",
    "with tf.Session() as sess:\n",
    "    print('reading the lastest version...')\n",
    "    saver.restore(sess,checkpoint)\n",
    "    with compiler.multiprocessing_pool():\n",
    "        print('training the model')\n",
    "        for epo in range(30,100):\n",
    "            save_path = \"data/trainedOneModelsWithCondition/model\"+str(epo)+\"/Zhao.class_model\"\n",
    "            for num in range(13):\n",
    "                print('read data_set_%s' % (num+1))\n",
    "                read_start = time.time()\n",
    "                \n",
    "                # 设置读取数据的起点和长度\n",
    "                dataset_temp = select(num*600000,600000)\n",
    "                np.random.shuffle(dataset_temp)\n",
    "                \n",
    "                print('completed read data_set_%s, time: %.4f' % (num+1,time.time()-read_start))\n",
    "                train_data_epo,dev_data_temp,_ = data_input(dataset_temp,1.0,0.0)\n",
    "                \n",
    "                #print('start epo_%d_%d sort' % (epo+1,num+1))\n",
    "                #sort_start = time.time()\n",
    "                #train_data_epo = sorted(train_data_epo,fun)\n",
    "                #print('start epo_%d_%d training, sort time: %.4f' % (epo+1,num+1,time.time()-sort_start)) \n",
    "                \n",
    "                train_data_epo_for_shuffled = compiler.build_loom_inputs(train_data_epo)\n",
    "                              \n",
    "                start = time.time()\n",
    "                \n",
    "                for shuffled in td.epochs(train_data_epo_for_shuffled,1):                    \n",
    "                    train_loss = train_epoch(num,shuffled,losses)\n",
    "                      \n",
    "                end = time.time()\n",
    "                train_data_epo = []\n",
    "                dev_data_temp = []\n",
    "                train_data_epo_for_shuffled = None\n",
    "                print('epo:%4d--%d, time:%f' % (epo+1,num+1,end-start))\n",
    "            checkpoint_path = saver.save(sess, save_path, global_step = epo)\n",
    "            print('model saved in file: %s' % checkpoint_path) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取已训练模型进行预测\n",
    "pretrain_path = '/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-11-06-49_35983'\n",
    "sess = tf.InteractiveSession()\n",
    "print('reading the lastest version...')\n",
    "saver.restore(sess,pretrain_path)\n",
    "print('reading completed!')\n",
    "\n",
    "\n",
    "valid_data, valid_labels = build_valid_data(dataset_temp)\n",
    "\n",
    "positive = 0\n",
    "counts = 0\n",
    "for num, (loop_input, label) in enumerate(get_valid_batch(valid_data, valid_labels, batch_size)):\n",
    "    \n",
    "    cur_valid_data = compiler.build_feed_dict(loop_input)\n",
    "    cur_valid_data[keep_prob_ph] = 1.0\n",
    "    output = sess.run(compiler.output_tensors,cur_valid_data)\n",
    "    cur_loss, cur_model_output = output[0], output[2]\n",
    "    cur_max_index = np.argmax(cur_model_output, axis=1)\n",
    "    correct_num = valid_metric(cur_max_index, label)\n",
    "    positive += sum(correct_num)\n",
    "    counts += len(label)\n",
    "\n",
    "ans = float(positive) / counts\n",
    "sys.stdout.write('\\rValid: accuracy: %.4f' %(ans))    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 衔接上面的interactivesession\n",
    "from collections import Counter\n",
    "for i in range(100):\n",
    "    test_data = select(120000*i,120000)\n",
    "    top1,top2,top5,top10 = [],[],[],[]\n",
    "    for batch_test_data in get_batch(test_data,batchsize=256):\n",
    "        out = dev_step_2(batch_test_data)\n",
    "        top1.extend(out[1])\n",
    "        top2.extend(out[2])\n",
    "        top5.extend(out[3])\n",
    "        top10.extend(out[4])\n",
    "        \n",
    "    top1 = Counter(top1)\n",
    "    top2 = Counter(top2)\n",
    "    top5 = Counter(top5)\n",
    "    top10 = Counter(top10)\n",
    "    pickle.dump([top1,top2,top5,top10],open('60wTest/range{}.pkl'.format(i+1),'wb'),True)\n",
    "    print('done ',i+1)\n",
    "    test_data = []\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "with open('60wTest/range2.pkl','rb') as man_file:  \n",
    "    top1,top2,top5,top10=pickle.load(man_file)  \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "print(top1)\n",
    "print(top2)\n",
    "print(top5)\n",
    "print(top10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sum(top1.values())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def dev_step_2(batch,k=10):\n",
    "    model_output = sess.run(compiler.output_tensors,compiler.build_feed_dict(batch))\n",
    "    dev_loss_batch = model_output[0]/float(len(batch))\n",
    "    logits = model_output[2]\n",
    "    top_k = sess.run(tf.nn.top_k(logits,k))\n",
    "    # top_10_index应该是一个列表\n",
    "    top_k_index = top_k.indices\n",
    "    hit = [] \n",
    "    top1_distribution = []\n",
    "    top2_distribution = []\n",
    "    top5_distribution = []\n",
    "    top10_distribution = []\n",
    "    for i in range(len(batch)):\n",
    "        if batch[i].gold_label in top_k_index[i][:10]:\n",
    "            top10_distribution.append(vocab2.token(batch[i].gold_label))\n",
    "        if batch[i].gold_label in top_k_index[i][:5]:   \n",
    "            top5_distribution.append(vocab2.token(batch[i].gold_label))\n",
    "        if batch[i].gold_label in top_k_index[i][:2]:   \n",
    "            top2_distribution.append(vocab2.token(batch[i].gold_label))\n",
    "        if batch[i].gold_label in top_k_index[i][:1]:   \n",
    "            top1_distribution.append(vocab2.token(batch[i].gold_label))\n",
    "\n",
    "    return dev_loss_batch,top1_distribution,top2_distribution,top5_distribution,top10_distribution\n",
    "    \n",
    "def dev_top_k(batches,k):\n",
    "    dev_loss_k = 0.0\n",
    "    dev_hits_k = 0.0\n",
    "    num_hits_k = 0.0\n",
    "    for num,batch in enumerate(batches,1):\n",
    "        print('batch_%d, length: %d' % (num,len(batch)))\n",
    "        dev_loss_batch_k, dev_hits_batch_k = dev_step_2(batch,k)\n",
    "        dev_loss_k += sum(dev_loss_batch_k)\n",
    "        num_hits_k += len(dev_loss_batch_k)\n",
    "        dev_hits_k += sum(dev_hits_batch_k)\n",
    "    dev_loss_k_aver = dev_loss_k/float(len(batches))\n",
    "    dev_hits_k_value = dev_hits_k/float(num_hits_k)\n",
    "    print ('dev_loss_avg_%d: %.3e, dev_accuracy_%d: %.2f' \n",
    "    % (k, dev_loss_k_aver, k, dev_hits_k_value))    \n",
    "    \n",
    "    return dev_loss_k,dev_hits_k_value\n",
    "\n",
    "def dev_eval_2(dev_set,k):\n",
    "    batches = []\n",
    "    for i in range(len(dev_set)//batch_size):\n",
    "        start = i*batch_size\n",
    "        end = min(len(dev_set),(i+1)*batch_size)\n",
    "        batches.append(dev_set[start:end])\n",
    "    print 'len dev:',len(batches)\n",
    "    \n",
    "    _,_ = dev_top_k(batches,k)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 计算验证集的top-k结果\n",
    "checkpoint_path = \"/home/x/mydisk/NewestTreeLSTMData/treelstmV9_mode_best-2019-11-04-11-06-49_35983\"\n",
    "with tf.Session() as sess:\n",
    "    saver.restore(sess,checkpoint_path)\n",
    "    dev_eval_2(dev_data,10)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from collections import Counter\n",
    "counter = Counter(indexs)\n",
    "print(counter)\n",
    "print(len(counter))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "import matplotlib.pyplot as plt\n",
    "num = counter.keys()\n",
    "value = counter.values()\n",
    "plt.bar(num,value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def filter_file(path,filter_contents):\n",
    "    filter_file = open(path,'r')\n",
    "    for line in filter_file.readlines():\n",
    "        if line == None:\n",
    "            break\n",
    "        elif line == '':\n",
    "            continue\n",
    "        else:\n",
    "            line = line.replace('\\r','')\n",
    "            line = line.replace('\\n','')\n",
    "            filter_contents[line] = line\n",
    "    filter_file.close()\n",
    "    \n",
    "def vocabSet(path,vocabset):\n",
    "    dirs = os.listdir(path)\n",
    "    print(len(dirs))\n",
    "    count = 0\n",
    "    for dir in dirs:\n",
    "        #print dir\n",
    "        vocabset[dir] = Vocab(path + '/'+ dir)\n",
    "        count +=1 \n",
    "        #print count\n",
    "\n",
    "def get_max_index(tree, max_index):\n",
    "    max = int(max_index)\n",
    "    if int(tree.index) > max:\n",
    "        max = int(tree.index)\n",
    "    for i in xrange(tree.num_children):\n",
    "        max = get_max_index(tree.children[i],max)\n",
    "    return max\n",
    "\n",
    "def get_node(tree, index):\n",
    "    node = tree\n",
    "    if int(tree.index) == int(index):\n",
    "        return node\n",
    "    else:\n",
    "        for i in xrange(tree.num_children):\n",
    "            node = get_node(tree.children[i], index)\n",
    "            if int(node.index) == int(index):\n",
    "                return node\n",
    "            else:\n",
    "                node = tree\n",
    "                continue\n",
    "        return node\n",
    "    \n",
    "def create_newTree(root,class_word):\n",
    "    newNode = Tree()\n",
    "    max_index = get_max_index(root,0)\n",
    "    newNode.index = max_index + 1\n",
    "    newNode.word = class_word\n",
    "    newNode.gold_label = None\n",
    "    newNode.node = None\n",
    "    if root.node != -1:\n",
    "        parentNode = get_node(root,root.node)\n",
    "        parentNode.add_child(newNode)\n",
    "        return root\n",
    "    else:\n",
    "        holeNode = Tree()\n",
    "        holeNode.word = 'hole'\n",
    "        holeNode.node = None\n",
    "        holeNode.gold_label = None\n",
    "        holeNode.index = max_index + 2\n",
    "        newNode.add_child(holeNode)\n",
    "        newNode.add_child(root)\n",
    "        newNode.gold_label = root.gold_label\n",
    "        newNode.node = newNode.index\n",
    "        root.gold_label = None\n",
    "        root.node = None\n",
    "        return newNode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "filter_contents = {}\n",
    "filter_file('data/filter.txt',filter_contents)\n",
    "print('filter completed!')\n",
    "vocabset = {}\n",
    "vocabSet('data/classVocabulary',vocabset)\n",
    "\n",
    "#checkpoint_path = 'data/method_model_500/treelstm_method_model_500-9'\n",
    "checkpoint_path = 'data/class_model_another_500/treelstm_class_model_more_500-4'\n",
    "checkpoint_path2 = 'data/treelstm_method_model_500-9'\n",
    "dev_data = data_set\n",
    "len(dev_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "correct"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def softmax(x):\n",
    "    return np.exp(x)/np.sum(np.exp(x),axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import copy\n",
    "\n",
    "tf.reset_default_graph()\n",
    "learning_rate = 0.01\n",
    "keep_prob_now = 0.75\n",
    "batch_size = 256\n",
    "epoch = 10\n",
    "num_gpus = 1\n",
    "embedding_learning_rate_factor = 0.1\n",
    "keep_prob_ph = tf.placeholder_with_default(1.0,[])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 合并2，预测输入tree的class及具体method名称\n",
    "def test_prediction2(batch,number):\n",
    "    print('Create the model...')\n",
    "    \n",
    "    tf.reset_default_graph()\n",
    "    word_embedding = create_embedding(weight_matrix=weight_matrix)\n",
    "    compiler, metrics = create_model(word_embedding=word_embedding,NUM_CLASS=42237,lstm_num_units=150,keep_prob=keep_prob_now,vocab=vocab)\n",
    "\n",
    "    saver = tf.train.Saver()\n",
    "\n",
    "    print('class prediction...')\n",
    "    with tf.Session() as sess:\n",
    "\n",
    "        saver.restore(sess,checkpoint_path)\n",
    "\n",
    "        class_output = sess.run(compiler.output_tensors,compiler.build_feed_dict(batch))\n",
    "        logits = class_output[2]\n",
    "\n",
    "        class_top_5 = sess.run(tf.nn.top_k(logits,5))\n",
    "\n",
    "        # top-5的编号[batchsize,5]\n",
    "        class_top_5_indices = class_top_5.indices\n",
    "\n",
    "        output_class = []\n",
    "        batch_newTrees = []\n",
    "        # batchsize维度\n",
    "        for i in range(len(class_top_5_indices)):\n",
    "            # top-5维度\n",
    "            words = []\n",
    "            probs = []\n",
    "            newTrees = []\n",
    "            newTree_is_stop = []\n",
    "            for index in class_top_5_indices[i]:\n",
    "                # 取出第i个输入得到的第index个word和prob\n",
    "                word = vocab2.token(index)\n",
    "                prob = logits[i][index]\n",
    "                words.append(word)\n",
    "                probs.append(prob)\n",
    "\n",
    "                copy_tree = copy.deepcopy(batch[i])\n",
    "                newTree = create_newTree(copy_tree,word)\n",
    "                newTrees.append(newTree)\n",
    "\n",
    "                if filter_contents.get(word) == None and word != '--':\n",
    "                    newTree_is_stop.append(0)            \n",
    "                else:\n",
    "                    newTree_is_stop.append(1)\n",
    "\n",
    "            # 对top-5的输出执行softmax操作\n",
    "            probs = softmax(probs)\n",
    "\n",
    "            # output_class是class判断模型的输出，格式为(batchsize,(newTree,prob,is_stop)*5)\n",
    "            output_class.extend(zip(newTrees,probs,words,newTree_is_stop))\n",
    "            batch_newTrees.extend(newTrees)\n",
    "\n",
    "        # 输出batch_size的class预测结果\n",
    "        #print(output_class)\n",
    "\n",
    "        print('prediction completed!')      \n",
    "\n",
    "    tf.reset_default_graph()\n",
    "\n",
    "    print('Create method model')\n",
    "\n",
    "    word_embedding = create_embedding(weight_matrix=weight_matrix)\n",
    "    compiler2, metrics2 = create_model(word_embedding=word_embedding,NUM_CLASS=1611,\n",
    "                                     lstm_num_units=150,keep_prob=keep_prob_now,vocab=vocab)\n",
    "    saver2 = tf.train.Saver()    \n",
    "\n",
    "    print('method predeciton...')\n",
    "    with tf.Session() as sess:\n",
    "\n",
    "        saver2.restore(sess,checkpoint_path2)\n",
    "\n",
    "        correct = 0\n",
    "\n",
    "        # 调用method模型\n",
    "        method_tuple_top_5 = []\n",
    "\n",
    "        # 批量获得method模型输出\n",
    "        method_output = sess.run(compiler2.output_tensors,compiler2.build_feed_dict(batch_newTrees))\n",
    "\n",
    "        # 获得method预测，格式为[n*5,1611]\n",
    "        method_logits = method_output[2]\n",
    "\n",
    "        # 批量获得top-5的概率和编号，格式为[n*5,5]\n",
    "        method_top_5 = sess.run(tf.nn.top_k(method_logits,5))\n",
    "        method_top_5_values = sess.run(tf.nn.softmax(method_top_5.values.tolist()))\n",
    "        method_top_5_indices = method_top_5.indices   \n",
    "\n",
    "        #print(method_top_5_values)\n",
    "        #print(method_top_5_indices)\n",
    "\n",
    "        # 对n*5条tuple判断和计算综合概率\n",
    "        batch_method_list = []\n",
    "        for num,(newTree,prob,class_word,is_stop) in enumerate(output_class):\n",
    "            method_list = []\n",
    "            # 对class是非终止word的情况，计算class和method的综合概率\n",
    "            if is_stop == 0:\n",
    "                methodVocab = vocabset[class_word+'.txt']\n",
    "                method_list_temp = [methodVocab.token(index) for index in method_top_5_indices[num]]\n",
    "\n",
    "                method_prob_temp = [prob*method_prob for method_prob in method_top_5_values[num]]\n",
    "                method_list.extend(zip(method_list_temp,method_prob_temp))\n",
    "                #if i == 1:\n",
    "                #    print(len(method_list_temp))\n",
    "                #    print(len(method_list))\n",
    "\n",
    "            # 对class是中止word的情况，直接使用classword和prob\n",
    "            else:\n",
    "                method_list.append((class_word,prob))\n",
    "            # 将每个tuple的判断和概率添加到list中，格式为[n*5,5 or 1]\n",
    "            batch_method_list.append(method_list)\n",
    "\n",
    "        ans_method_prob = []\n",
    "        for i in range(len(batch)):\n",
    "            method_prob_each_tree = []\n",
    "            pred_list = batch_method_list[i*5:(i+1)*5]\n",
    "            for each_list in pred_list:\n",
    "                method_prob_each_tree.extend(each_list)\n",
    "            method_list_top_5 = sorted(method_prob_each_tree,key=lambda x:x[1],reverse=True)[:5]\n",
    "            ans_method_prob.append(method_list_top_5)\n",
    "            method_names = [name for name,method_prob in method_list_top_5]\n",
    "            if predictions[i+512*number] in method_names[:1]:\n",
    "                correct+=1\n",
    "                \n",
    "        print('correct: ',correct)\n",
    "        return correct,len(batch)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 合并3，预测输入tree的class及method编号\n",
    "def test_prediction3(batch,number):\n",
    "    print('Create the model...')\n",
    "    \n",
    "    tf.reset_default_graph()\n",
    "    word_embedding = create_embedding(weight_matrix=weight_matrix)\n",
    "    compiler, metrics = create_model(word_embedding=word_embedding,NUM_CLASS=42237,lstm_num_units=150,keep_prob=keep_prob_now,vocab=vocab)\n",
    "\n",
    "    saver = tf.train.Saver()\n",
    "\n",
    "    print('class prediction...')\n",
    "    with tf.Session() as sess:\n",
    "\n",
    "        saver.restore(sess,checkpoint_path)\n",
    "\n",
    "        class_output = sess.run(compiler.output_tensors,compiler.build_feed_dict(batch))\n",
    "        logits = class_output[2]\n",
    "\n",
    "        class_top_5 = sess.run(tf.nn.top_k(logits,5))\n",
    "\n",
    "        # top-5的编号[batchsize,5]\n",
    "        class_top_5_indices = class_top_5.indices\n",
    "\n",
    "        output_class = []\n",
    "        batch_newTrees = []\n",
    "        # batchsize维度\n",
    "        for i in range(len(class_top_5_indices)):\n",
    "            # top-5维度\n",
    "            words = []\n",
    "            nums = []\n",
    "            probs = []\n",
    "            newTrees = []\n",
    "            newTree_is_stop = []\n",
    "            for index in class_top_5_indices[i]:\n",
    "                # 取出第i个输入得到的第index个word和prob\n",
    "                word = vocab2.token(index)\n",
    "                num = index\n",
    "                prob = logits[i][index]\n",
    "                words.append(word)\n",
    "                probs.append(prob)\n",
    "                nums.append(num)\n",
    "\n",
    "                copy_tree = copy.deepcopy(batch[i])\n",
    "                newTree = create_newTree(copy_tree,word)\n",
    "                newTrees.append(newTree)\n",
    "\n",
    "                if filter_contents.get(word) == None and word != '--':\n",
    "                    newTree_is_stop.append(0)            \n",
    "                else:\n",
    "                    newTree_is_stop.append(1)\n",
    "\n",
    "            # 对top-5的输出执行softmax操作\n",
    "            probs = softmax(probs)\n",
    "\n",
    "            # output_class是class判断模型的输出，格式为(batchsize,(newTree,prob,is_stop)*5)\n",
    "            output_class.extend(zip(newTrees,probs,words,nums,newTree_is_stop))\n",
    "            batch_newTrees.extend(newTrees)\n",
    "\n",
    "        # 输出batch_size的class预测结果\n",
    "        #print(output_class)\n",
    "\n",
    "        print('prediction completed!')      \n",
    "\n",
    "    tf.reset_default_graph()\n",
    "\n",
    "    print('Create method model')\n",
    "\n",
    "    word_embedding = create_embedding(weight_matrix=weight_matrix)\n",
    "    compiler2, metrics2 = create_model(word_embedding=word_embedding,NUM_CLASS=1611,\n",
    "                                     lstm_num_units=150,keep_prob=keep_prob_now,vocab=vocab)\n",
    "    saver2 = tf.train.Saver()    \n",
    "\n",
    "    print('method predeciton...')\n",
    "    with tf.Session() as sess:\n",
    "\n",
    "        saver2.restore(sess,checkpoint_path2)\n",
    "\n",
    "        correct = {'top-1':0,'top-2':0,'top-3':0,'top-4':0,'top-5':0,'top-10':0}\n",
    "\n",
    "        # 调用method模型\n",
    "        method_tuple_top_5 = []\n",
    "\n",
    "        # 批量获得method模型输出\n",
    "        method_output = sess.run(compiler2.output_tensors,compiler2.build_feed_dict(batch_newTrees))\n",
    "\n",
    "        # 获得method预测，格式为[n*5,1611]\n",
    "        method_logits = method_output[2]\n",
    "\n",
    "        # 批量获得top-5的概率和编号，格式为[n*5,5]\n",
    "        method_top_5 = sess.run(tf.nn.top_k(method_logits,5))\n",
    "        method_top_5_values = sess.run(tf.nn.softmax(method_top_5.values.tolist()))\n",
    "        method_top_5_indices = method_top_5.indices   \n",
    "\n",
    "        #print(method_top_5_values)\n",
    "        #print(method_top_5_indices)\n",
    "\n",
    "        # 对n*5条tuple判断和计算综合概率\n",
    "        batch_method_list = []\n",
    "        for num,(newTree,prob,class_word,class_num,is_stop) in enumerate(output_class):\n",
    "            method_list = []\n",
    "            # 对class是非终止word的情况，计算class和method的综合概率\n",
    "            if is_stop == 0:\n",
    "                #methodVocab = vocabset[class_word+'.txt']\n",
    "                method_list_temp = [index for index in method_top_5_indices[num]]  \n",
    "                method_prob_temp = [prob*method_prob for method_prob in method_top_5_values[num]]\n",
    "                method_list.extend(zip(method_list_temp,method_prob_temp))\n",
    "                #if i == 1:\n",
    "                #    print(len(method_list_temp))\n",
    "                #    print(len(method_list))\n",
    "\n",
    "            # 对class是中止word的情况，直接使用classword和prob\n",
    "            else:\n",
    "                method_list.append((class_num,prob))\n",
    "            # 将每个tuple的判断和概率添加到list中，格式为[n*5,5 or 1]\n",
    "            batch_method_list.append(method_list)\n",
    "\n",
    "        #print(batch_method_list)\n",
    "        ans_method_prob = []\n",
    "        for i in range(len(batch)):\n",
    "            method_prob_each_tree = []\n",
    "            pred_list = batch_method_list[i*5:(i+1)*5]\n",
    "            for each_list in pred_list:\n",
    "                method_prob_each_tree.extend(each_list)\n",
    "            method_list_top_10 = sorted(method_prob_each_tree,key=lambda x:x[1],reverse=True)[:10]\n",
    "            ans_method_prob.append(method_list_top_10)\n",
    "            method_nums = [num for num,method_prob in method_list_top_10]\n",
    "            if batch[i].gold_label in method_nums[:1]:\n",
    "                correct['top-1'] += 1\n",
    "            if batch[i].gold_label in method_nums[:2]:\n",
    "                correct['top-2'] += 1\n",
    "            if batch[i].gold_label in method_nums[:3]:\n",
    "                correct['top-3'] += 1\n",
    "            if batch[i].gold_label in method_nums[:4]:\n",
    "                correct['top-4'] += 1\n",
    "            if batch[i].gold_label in method_nums[:5]:\n",
    "                correct['top-5'] += 1\n",
    "            if batch[i].gold_label in method_nums[:10]:\n",
    "                correct['top-10'] += 1\n",
    "            #print((batch[i].gold_label,method_nums[:1]))\n",
    "                \n",
    "        print('top-1: %d, top-2: %d, top-3: %d, top-4: %d, top-5: %d, top-10: %d' % (correct['top-1'],correct['top-2'],\n",
    "                                                                                     correct['top-3'],correct['top-4'],\n",
    "                                                                                     correct['top-5'],correct['top-10']))\n",
    "        return correct,len(batch)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import print_function\n",
    "correct_all = {'top-1':0,'top-2':0,'top-3':0,'top-4':0,'top-5':0,'top-10':0}\n",
    "num_all = 0\n",
    "for num,batch in enumerate(get_batch(data_set,512)):\n",
    "    correct,num = test_prediction3(batch,num)\n",
    "    correct_all['top-1'] += correct['top-1']\n",
    "    correct_all['top-2'] += correct['top-2']\n",
    "    correct_all['top-3'] += correct['top-3']\n",
    "    correct_all['top-4'] += correct['top-4']\n",
    "    correct_all['top-5'] += correct['top-5']\n",
    "    correct_all['top-10'] += correct['top-10']\n",
    "    num_all+=num\n",
    "\n",
    "print('top-1: %.2f%%, top-2: %.2f%%, top-3: %.2f%%, top-4: %.2f%%, top-5: %.2f%%, top-10: %.2f%%' % \n",
    "      (correct_all['top-1']/float(num_all)*100,correct_all['top-2']/float(num_all)*100,correct_all['top-3']/float(num_all)*100,\n",
    "      correct_all['top-4']/float(num_all)*100,correct_all['top-5']/float(num_all)*100,correct_all['top-10']/float(num_all)*100))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tf1py2",
   "language": "python",
   "name": "tf1py2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
