{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "把之前的训练数据划分为 3 份，每次训练把其中的一份丢弃。训练三个不同的模型。之后再用这些模型来进行融合。\n",
    "\n",
    "参考代码： \n",
    "\n",
    "https://github.com/brightmart/text_classification/blob/master/a02_TextCNN/p7_TextCNN_model.py \n",
    "\n",
    "https://github.com/Qinbf/Tensorflow/blob/master/Tensorflow%E5%9F%BA%E7%A1%80%E4%BD%BF%E7%94%A8%E4%B8%8E%E6%96%87%E6%9C%AC%E5%88%86%E7%B1%BB%E5%BA%94%E7%94%A8/%E7%A8%8B%E5%BA%8F/cnn.ipynb\n",
    "\n",
    "- 每个模型保存到对应的模型位置\n",
    "- 每个模型生成一个 scores 分数矩阵用于后面进行模型融合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Starting ...\n",
      "removed the existing summary files.\n",
      "Prepared, costed time 6.85836 s.\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt \n",
    "from gensim.models import KeyedVectors\n",
    "import pickle\n",
    "import os\n",
    "import sys\n",
    "import shutil\n",
    "import time\n",
    "\n",
    "\n",
    "time0 = time.time()\n",
    "print('Starting ...')\n",
    "model_name = 'textcnn-fc-drop-title-content-256-345-cross3'                    # 模型名称\n",
    "W_embedding = np.load('../data/W_embedding.npy')            # 导入预训练好的词向量\n",
    "\n",
    "summary_path = '../summary/' + model_name + '/'             # summary 位置\n",
    "if os.path.exists(summary_path):   # 删除原来的 summary 文件，避免重合\n",
    "    print('removed the existing summary files.')\n",
    "    shutil.rmtree(summary_path)\n",
    "os.makedirs(summary_path)          # 然后再次创建\n",
    "    \n",
    "# ##################### config ######################\n",
    "n_step1 = max_len1 = 50                   # title句子长度\n",
    "n_step2= max_len2 = 150                   # content 长度\n",
    "input_size = embedding_size = 256       # 字向量长度\n",
    "n_class = 1999                          # 类别总数\n",
    "filter_sizes = [3,4,5]                  # 卷积核大小\n",
    "n_filter = 256                          # 每种卷积核的个数\n",
    "fc_hidden_size = 1024                   # fc 层节点数\n",
    "n_filter_total = n_filter * len(filter_sizes)\n",
    "summary_step = 0\n",
    "global_step = 0\n",
    "valid_num = 100000\n",
    "seed_num = 13\n",
    "tr_batch_size = 128\n",
    "te_batch_size = 128\n",
    "print('Prepared, costed time %g s.' % (time.time() - time0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Building model ...\n",
      "Finished creating the TextCNN model.\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "config = tf.ConfigProto()\n",
    "config.gpu_options.allow_growth = True\n",
    "sess = tf.Session(config=config)\n",
    "from tensorflow.contrib import rnn\n",
    "import tensorflow.contrib.layers as layers\n",
    "\n",
    "'''\n",
    "双端 GRU，知乎问题多标签分类。\n",
    "'''\n",
    "print('Building model ...')\n",
    "lr = tf.placeholder(tf.float32)\n",
    "keep_prob = tf.placeholder(tf.float32, [])\n",
    "batch_size = tf.placeholder(tf.int32, [])  # 注意类型必须为 tf.int32\n",
    "tst = tf.placeholder(tf.bool)\n",
    "n_updates = tf.placeholder(tf.int32)      # training iteration,传入 bn 层\n",
    "update_emas = list()   # BN 层中所有的更新操作\n",
    "\n",
    "\n",
    "def weight_variable(shape, name):\n",
    "    \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n",
    "    initial = tf.truncated_normal(shape, stddev=0.1)\n",
    "    return tf.Variable(initial, name=name)\n",
    "\n",
    "def bias_variable(shape, name):\n",
    "    \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n",
    "    initial = tf.constant(0.1, shape=shape)\n",
    "    return tf.Variable(initial, name=name)\n",
    "\n",
    "def batchnorm(Ylogits, is_test, num_updates, offset, convolutional=False):\n",
    "    \"\"\"batchnormalization.\n",
    "    Args:\n",
    "        Ylogits: 1D向量或者是3D的卷积结果。\n",
    "        num_updates: 迭代的global_step\n",
    "        offset：表示beta，全局均值；在 RELU 激活中一般初始化为 0.1。\n",
    "        scale：表示lambda，全局方差；在 sigmoid 激活中需要，这 RELU 激活中作用不大。\n",
    "        m: 表示batch均值；v:表示batch方差。\n",
    "        bnepsilon：一个很小的浮点数，防止除以 0.\n",
    "    Returns:\n",
    "        Ybn: 和 Ylogits 的维度一样，就是经过 Batch Normalization 处理的结果。\n",
    "        update_moving_everages：更新mean和variance，主要是给最后的 test 使用。\n",
    "    \"\"\"\n",
    "    exp_moving_avg = tf.train.ExponentialMovingAverage(0.999, num_updates) # adding the iteration prevents from averaging across non-existing iterations\n",
    "    bnepsilon = 1e-5\n",
    "    if convolutional:\n",
    "        mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])\n",
    "    else:\n",
    "        mean, variance = tf.nn.moments(Ylogits, [0])\n",
    "    update_moving_everages = exp_moving_avg.apply([mean, variance])\n",
    "    m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)\n",
    "    v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)\n",
    "    Ybn = tf.nn.batch_normalization(Ylogits, m, v, offset, None, bnepsilon)\n",
    "    return Ybn, update_moving_everages\n",
    "\n",
    "\n",
    "with tf.name_scope('Inputs'):\n",
    "    X1_inputs = tf.placeholder(tf.int64, [None, n_step1], name='X1_input')\n",
    "    X2_inputs = tf.placeholder(tf.int64, [None, n_step2], name='X2_input')\n",
    "    y_inputs = tf.placeholder(tf.float32, [None, n_class], name='y_input')    \n",
    "\n",
    "with tf.name_scope('embedding_layer'):\n",
    "    embedding = tf.get_variable(name=\"W_embedding\", shape=W_embedding.shape, \n",
    "                        initializer=tf.constant_initializer(W_embedding), trainable=True)   # fine-tune\n",
    "\n",
    "def textcnn(X_inputs, n_step):\n",
    "    \"\"\"build the TextCNN network. Return the h_drop\"\"\"\n",
    "    # X_inputs.shape = [batchsize, n_step]  ->  inputs.shape = [batchsize, n_step, embedding_size]\n",
    "    inputs = tf.nn.embedding_lookup(embedding, X_inputs)  \n",
    "    inputs = tf.expand_dims(inputs, -1)\n",
    "    pooled_outputs = list()\n",
    "    for i, filter_size in enumerate(filter_sizes):\n",
    "        with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\n",
    "            # Convolution Layer\n",
    "            filter_shape = [filter_size, embedding_size, 1, n_filter]\n",
    "            W_filter = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W_filter\")\n",
    "#             tf.summary.histogram('W_filter', W_filter)\n",
    "            beta = tf.Variable(tf.constant(0.1, tf.float32, shape=[n_filter], name=\"beta\"))\n",
    "            tf.summary.histogram('beta', beta)\n",
    "            conv = tf.nn.conv2d(inputs, W_filter, strides=[1, 1, 1, 1], padding=\"VALID\", name=\"conv\")\n",
    "            conv_bn, update_ema = batchnorm(conv, tst, n_updates, beta, convolutional=True)    # 在激活层前面加 BN\n",
    "            # Apply nonlinearity, batch norm scaling is not useful with relus\n",
    "            # batch norm offsets are used instead of biases,使用 BN 层的 offset，不要 biases\n",
    "            h = tf.nn.relu(conv_bn, name=\"relu\")\n",
    "            # Maxpooling over the outputs\n",
    "            pooled = tf.nn.max_pool(h,ksize=[1, n_step - filter_size + 1, 1, 1],\n",
    "                strides=[1, 1, 1, 1],padding='VALID',name=\"pool\")\n",
    "            pooled_outputs.append(pooled)\n",
    "            update_emas.append(update_ema)\n",
    "    h_pool = tf.concat(pooled_outputs, 3)\n",
    "    h_pool_flat = tf.reshape(h_pool, [-1, n_filter_total]) \n",
    "    return h_pool_flat    # shape = [-1, n_filter_total]\n",
    "    \n",
    "    \n",
    "with tf.name_scope('cnn-title'):\n",
    "    output_title = textcnn(X1_inputs, n_step1)\n",
    "with tf.name_scope('cnn-content'):\n",
    "    output_content = textcnn(X2_inputs, n_step2)\n",
    "with tf.name_scope('fc-bn-layer'):\n",
    "    output = tf.concat([output_title, output_content], axis=1)\n",
    "    W_fc = weight_variable([n_filter_total*2, fc_hidden_size], name='Weight_fc')\n",
    "    tf.summary.histogram('W_fc', W_fc)\n",
    "    h_fc = tf.matmul(output, W_fc, name='h_fc')\n",
    "    beta_fc = tf.Variable(tf.constant(0.1, tf.float32, shape=[fc_hidden_size], name=\"beta_fc\"))\n",
    "    tf.summary.histogram('beta_fc', beta_fc)\n",
    "    fc_bn, update_ema_fc = batchnorm(h_fc, tst, n_updates, beta_fc, convolutional=False)\n",
    "    update_emas.append(update_ema_fc)\n",
    "    fc_bn_relu = tf.nn.relu(fc_bn, name=\"relu\")\n",
    "    fc_bn_drop = tf.nn.dropout(fc_bn_relu, keep_prob)\n",
    "\n",
    "with tf.name_scope('out_layer'):\n",
    "    W_out = weight_variable([fc_hidden_size, n_class], name='Weight_out') \n",
    "    tf.summary.histogram('Weight_out', W_out)\n",
    "    b_out = bias_variable([n_class], name='bias_out') \n",
    "    tf.summary.histogram('bias_out', b_out)\n",
    "    y_pred = tf.nn.xw_plus_b(fc_bn_drop, W_out, b_out, name='y_pred')  #每个类别的分数 scores\n",
    "    \n",
    "with tf.name_scope('cost'):\n",
    "    cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_pred, labels=y_inputs))\n",
    "    tf.summary.scalar('cost', cost)\n",
    "\n",
    "# 优化器\n",
    "with tf.name_scope('AdamOptimizer'):\n",
    "    optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n",
    "    train_op = optimizer.minimize(cost)\n",
    "    update_op = tf.group(*update_emas)   # 更新 BN 参数\n",
    "\n",
    "# summary\n",
    "merged = tf.summary.merge_all() # summary\n",
    "train_writer = tf.summary.FileWriter(summary_path + 'train', sess.graph)\n",
    "test_writer = tf.summary.FileWriter(summary_path + 'test')\n",
    "print 'Finished creating the TextCNN model.'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading data...\n",
      "finished loading data, time cost 61.1376 s\n",
      "('X_train.shape=', (2899952, 200), 'y_train.shape=', (2899952,))\n",
      "('X_valid.shape=', (100000, 200), 'y_valid.shape=', (100000,))\n",
      "Finised loading data, time 76.744 s\n"
     ]
    }
   ],
   "source": [
    "sys.path.append('..')\n",
    "from data_helpers import BatchGenerator\n",
    "from data_helpers import to_categorical\n",
    "from evaluator import score_eval\n",
    "\n",
    "save_path = '../data/'\n",
    "print('loading data...')\n",
    "time0 = time.time()\n",
    "X_title = np.load(save_path+'X_tr_title_50.npy')\n",
    "X_content = np.load(save_path+'X_tr_content_150.npy')\n",
    "X = np.hstack([X_title, X_content])\n",
    "y = np.load(save_path+'y_tr.npy')\n",
    "print('finished loading data, time cost %g s' % (time.time() - time0))\n",
    "# 划分验证集\n",
    "sample_num = X.shape[0]\n",
    "np.random.seed(seed_num)\n",
    "new_index = np.random.permutation(sample_num)\n",
    "\n",
    "X = X[new_index]\n",
    "y = y[new_index]\n",
    "X_valid = X[:valid_num]\n",
    "y_valid = y[:valid_num]\n",
    "X_train = X[valid_num:]\n",
    "y_train = y[valid_num:]   \n",
    "np.random.seed(10)\n",
    "new_tr_index = np.random.permutation(len(X_train)) # 继续打乱训练集的顺序\n",
    "X_train = X_train[new_tr_index]\n",
    "y_train = y_train[new_tr_index]\n",
    "# 构建数据生成器\n",
    "data_valid = BatchGenerator(X_valid, y_valid, shuffle=False)\n",
    "print('X_train.shape=', X_train.shape, 'y_train.shape=', y_train.shape)\n",
    "print('X_valid.shape=', data_valid.X.shape, 'y_valid.shape=', data_valid.y.shape)\n",
    "\n",
    "# valid 数据及 验证集计算\n",
    "marked_labels_list = data_valid.y.tolist() # 所有的标注结果\n",
    "valid_data_size = data_valid.y.shape[0]\n",
    "def valid_epoch():\n",
    "    \"\"\"Testing or valid.\"\"\"\n",
    "    global global_step\n",
    "    data_valid._index_in_epoch = 0  # 先指向第一个值\n",
    "    _batch_size = tr_batch_size\n",
    "    fetches = [cost, y_pred]   \n",
    "    batch_num = int(valid_data_size / _batch_size)\n",
    "    start_time = time.time()\n",
    "    _costs = 0.0\n",
    "    predict_labels_list = list()  # 所有的预测结果\n",
    "    for i in xrange(batch_num):\n",
    "        X_batch, y_batch = data_valid.next_batch(_batch_size)\n",
    "        X1_batch = X_batch[:, :n_step1]\n",
    "        X2_batch = X_batch[:, n_step1:]\n",
    "        y_batch = to_categorical(y_batch)\n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch,  y_inputs:y_batch, lr:1e-5,\n",
    "                     batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "        _cost, predict_labels = sess.run(fetches, feed_dict)\n",
    "        _costs += _cost    \n",
    "        predict_labels = map(lambda label: label.argsort()[-1:-6:-1], predict_labels) # 取最大的5个下标\n",
    "        predict_labels_list.extend(predict_labels)\n",
    "    predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "    precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "    mean_cost = _costs / batch_num\n",
    "    return mean_cost, precision, recall, f1\n",
    "\n",
    "print('Finised loading data, time %g s' % (time.time() - time0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##  模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "decay = 0.8             # 每个 epoch lr降低的系数\n",
    "valid_step = 6000        # 每 valid_step 就进行一次 valid 运算\n",
    "max_epoch = 1\n",
    "max_max_epoch = 6       # 最多迭代的次数\n",
    "save_epoch = 1           # 每迭代 save_epoch 次保存一次模型\n",
    "saver = tf.train.Saver(max_to_keep=18)           # 最多保存的模型数量\n",
    "sample_num = len(X_train)         # 所有训练样本的数量\n",
    "K = 3   # 训练数据划分成 3 份\n",
    "split_size = int(sample_num / K)  # 每份数据的大小  \n",
    "summary_step = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def train(valid_k, max_max_epoch=7):\n",
    "    \"\"\"训练模型。\n",
    "    把第 valid_k 份数据留出来作为验证集；其余的数据作为训练集。\n",
    "    valid_k 取值为 0,1,...,K-1\n",
    "    \"\"\"\n",
    "    global summary_step   # 一旦这个值在函数内部改变了，就认为它是内部变量，所以要加上 global\n",
    "    global global_step\n",
    "    global_step = 0\n",
    "    te_start = valid_k*split_size\n",
    "    te_end = min(te_start+split_size, sample_num)\n",
    "    print('***Begin training, valid_k=%d, [%d,%d]' % (valid_k, te_start, te_end))\n",
    "    tr_indexs = range(0,te_start) + range(te_end, sample_num)   # 训练数据的下标\n",
    "    data_train = BatchGenerator(X_train[tr_indexs], y_train[tr_indexs],shuffle=True)\n",
    "    model_path = '../ckpt/' + model_name    # 模型的保存位置\n",
    "    if not os.path.exists(model_path):\n",
    "        os.makedirs(model_path)         \n",
    "    model_path = model_path + '/' + 'cross' + str(valid_k) + '.ckpt'\n",
    "    tr_batch_num = int(data_train.y.shape[0] / tr_batch_size)  # 每个 epoch 中包含的 batch 数\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    _lr = 5e-4\n",
    "    for epoch in xrange(max_max_epoch):\n",
    "        if (epoch >= max_epoch):\n",
    "            _lr = _lr * decay\n",
    "        print 'EPOCH %d， lr=%g' % (epoch+1, _lr)\n",
    "        time0 = time.time()\n",
    "        start_time = time.time()\n",
    "        _costs = 0.0\n",
    "        for batch in xrange(tr_batch_num): \n",
    "            global_step += 1\n",
    "            summary_step += 1\n",
    "            if (batch+1) % valid_step == 0:    # 进行 valid 计算\n",
    "                valid_cost, precision, recall, f1 = valid_epoch()\n",
    "                print('Global_step=%d: valid cost=%g; p=%g, r=%g, f1=%g ' % (\n",
    "                        global_step, valid_cost, precision, recall, f1))\n",
    "                if (epoch >= max_epoch):\n",
    "                    _lr = _lr * decay\n",
    "            X_batch, y_batch = data_train.next_batch(tr_batch_size)\n",
    "            X1_batch = X_batch[:, :n_step1]\n",
    "            X2_batch = X_batch[:, n_step1:]\n",
    "            y_batch = to_categorical(y_batch)\n",
    "            feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch,  y_inputs:y_batch, lr:_lr,\n",
    "                         batch_size:tr_batch_size, keep_prob:0.5, tst:False, n_updates:global_step}\n",
    "            fetches = [merged, cost, train_op, update_op]\n",
    "            summary, _cost, _, _ = sess.run(fetches, feed_dict) # the cost is the mean cost of one batch\n",
    "            _costs += _cost\n",
    "            if global_step % 100:  #　验证一次\n",
    "                train_writer.add_summary(summary, summary_step)\n",
    "                X_batch, y_batch = data_valid.next_batch(tr_batch_size)\n",
    "                X1_batch = X_batch[:, :n_step1]\n",
    "                X2_batch = X_batch[:, n_step1:]\n",
    "                y_batch = to_categorical(y_batch)\n",
    "                feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch,  y_inputs:y_batch, lr:1e-5,\n",
    "                             batch_size:tr_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "                fetches = [merged, cost]\n",
    "                summary, _cost = sess.run(fetches, feed_dict)\n",
    "                test_writer.add_summary(summary, summary_step)\n",
    "        valid_cost, precision, recall, f1 = valid_epoch()  # #每个 epoch 结束后进行一次整体 valid\n",
    "        mean_cost = _costs / tr_batch_num\n",
    "        print('Globel_step=%d. Training cost=%g; Valid cost=%g; p=%g, r=%g, f1=%g; Speed=%g s/epoch' % (\n",
    "            global_step, mean_cost, valid_cost, precision, recall, f1, time.time()-time0) )\n",
    "        if (epoch + 1) % save_epoch == 0:  # 每 2 个 epoch 保存一次模型\n",
    "            model_save_path = saver.save(sess, model_path, global_step=(epoch+1))\n",
    "            print('CKPT path is %s ' % model_save_path) \n",
    "    print('Finished training, valid_k=%d' % valid_k)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "MODEL 0/2\n",
      "***Begin training, valid_k=0, [0,966650]\n",
      "EPOCH 1， lr=0.0005\n",
      "Global_step=6000: valid cost=0.00508873; p=1.16231, r=0.476933, f1=0.338171 \n",
      "Globel_step=7551. Training cost=0.00901025; Valid cost=0.00490458; p=1.21397, r=0.496749, f1=0.352506; Speed=2800.03 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-1 \n",
      "EPOCH 2， lr=0.0004\n",
      "Global_step=13551: valid cost=0.00446955; p=1.31376, r=0.536772, f1=0.381074 \n",
      "Globel_step=15102. Training cost=0.00435597; Valid cost=0.00432388; p=1.32351, r=0.541196, f1=0.384123; Speed=2721.76 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-2 \n",
      "EPOCH 3， lr=0.000256\n",
      "Global_step=21102: valid cost=0.00441031; p=1.34665, r=0.550044, f1=0.390531 \n",
      "Globel_step=22653. Training cost=0.00395624; Valid cost=0.00421323; p=1.35773, r=0.554391, f1=0.393654; Speed=2558.2 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-3 \n",
      "EPOCH 4， lr=0.00016384\n",
      "Global_step=28653: valid cost=0.00412306; p=1.37254, r=0.560374, f1=0.397915 \n",
      "Globel_step=30204. Training cost=0.00374528; Valid cost=0.0041173; p=1.37712, r=0.561795, f1=0.399017; Speed=2615.53 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-4 \n",
      "EPOCH 5， lr=0.000104858\n",
      "Global_step=36204: valid cost=0.00412168; p=1.38154, r=0.563663, f1=0.40033 \n",
      "Globel_step=37755. Training cost=0.00360567; Valid cost=0.00403413; p=1.38489, r=0.564733, f1=0.401151; Speed=2753.74 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-5 \n",
      "EPOCH 6， lr=6.71089e-05\n",
      "Global_step=43755: valid cost=0.00406135; p=1.38792, r=0.565753, f1=0.40192 \n",
      "Globel_step=45306. Training cost=0.00351011; Valid cost=0.00403459; p=1.38882, r=0.566057, f1=0.402149; Speed=2781.49 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-6 \n",
      "Finished training, valid_k=0\n",
      "MODEL 1/2\n",
      "***Begin training, valid_k=1, [966650,1933300]\n",
      "EPOCH 1， lr=0.0005\n",
      "Global_step=6000: valid cost=0.0049978; p=1.15896, r=0.474822, f1=0.336825 \n",
      "Globel_step=7551. Training cost=0.00903488; Valid cost=0.00482004; p=1.20771, r=0.494964, f1=0.351079; Speed=2665.04 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-1 \n",
      "EPOCH 2， lr=0.0004\n",
      "Global_step=13551: valid cost=0.00458251; p=1.30049, r=0.531761, f1=0.377432 \n",
      "Globel_step=15102. Training cost=0.00435584; Valid cost=0.00456428; p=1.31587, r=0.538433, f1=0.382089; Speed=2855.93 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-2 \n",
      "EPOCH 3， lr=0.000256\n",
      "Global_step=21102: valid cost=0.00437747; p=1.35012, r=0.551354, f1=0.391483 \n",
      "Globel_step=22653. Training cost=0.00395528; Valid cost=0.00421347; p=1.35955, r=0.555269, f1=0.394249; Speed=2772.01 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-3 \n",
      "EPOCH 4， lr=0.00016384\n",
      "Global_step=28653: valid cost=0.00414691; p=1.37023, r=0.559436, f1=0.397248 \n",
      "Globel_step=30204. Training cost=0.00374508; Valid cost=0.00412755; p=1.37821, r=0.562438, f1=0.399433; Speed=2893.95 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-4 \n",
      "EPOCH 5， lr=0.000104858\n",
      "Global_step=36204: valid cost=0.00409543; p=1.38084, r=0.563264, f1=0.40007 \n",
      "Globel_step=37755. Training cost=0.00360519; Valid cost=0.00406943; p=1.38322, r=0.564391, f1=0.400838; Speed=2640.49 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-5 \n",
      "EPOCH 6， lr=6.71089e-05\n",
      "Global_step=43755: valid cost=0.00401666; p=1.38693, r=0.565427, f1=0.401672 \n",
      "Globel_step=45306. Training cost=0.00351253; Valid cost=0.00402472; p=1.38857, r=0.565915, f1=0.402056; Speed=2667.95 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-6 \n",
      "Finished training, valid_k=1\n",
      "MODEL 2/2\n",
      "***Begin training, valid_k=2, [1933300,2899950]\n",
      "EPOCH 1， lr=0.0005\n",
      "Global_step=6000: valid cost=0.00492233; p=1.16693, r=0.478329, f1=0.339264 \n",
      "Globel_step=7551. Training cost=0.00895681; Valid cost=0.00475339; p=1.21713, r=0.498124, f1=0.353465; Speed=2455.73 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-1 \n",
      "EPOCH 2， lr=0.0004\n",
      "Global_step=13551: valid cost=0.00467295; p=1.29588, r=0.53051, f1=0.376413 \n",
      "Globel_step=15102. Training cost=0.0043525; Valid cost=0.00441157; p=1.32648, r=0.541714, f1=0.384635; Speed=2647.26 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-2 \n",
      "EPOCH 3， lr=0.000256\n",
      "Global_step=21102: valid cost=0.00434059; p=1.35511, r=0.553633, f1=0.393051 \n",
      "Globel_step=22653. Training cost=0.00395446; Valid cost=0.00425517; p=1.35934, r=0.55463, f1=0.393909; Speed=2801.89 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-3 \n",
      "EPOCH 4， lr=0.00016384\n",
      "Global_step=28653: valid cost=0.00414575; p=1.37058, r=0.559577, f1=0.397349 \n",
      "Globel_step=30204. Training cost=0.00374111; Valid cost=0.00408303; p=1.37789, r=0.562138, f1=0.399254; Speed=3108.56 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-4 \n",
      "EPOCH 5， lr=0.000104858\n",
      "Global_step=36204: valid cost=0.00409132; p=1.38129, r=0.563547, f1=0.400251 \n",
      "Globel_step=37755. Training cost=0.00360398; Valid cost=0.00400963; p=1.38602, r=0.565029, f1=0.401395; Speed=2801.23 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-5 \n",
      "EPOCH 6， lr=6.71089e-05\n",
      "Global_step=43755: valid cost=0.00407379; p=1.38619, r=0.565132, f1=0.401462 \n",
      "Globel_step=45306. Training cost=0.00351022; Valid cost=0.00403858; p=1.38803, r=0.565963, f1=0.402034; Speed=3006.45 s/epoch\n",
      "CKPT path is ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-6 \n",
      "Finished training, valid_k=2\n"
     ]
    }
   ],
   "source": [
    "for valid_k in xrange(K):\n",
    "    print('MODEL %d/%d' % (valid_k,K-1))\n",
    "    train(valid_k, max_max_epoch=6)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 没有收敛，继续训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "valid_step = 6000        # 每 valid_step 就进行一次 valid 运算\n",
    "saver = tf.train.Saver(max_to_keep=45)           # 最多保存的模型数量\n",
    "sample_num = len(X_train)         # 所有训练样本的数量\n",
    "K = 3   # 训练数据划分成 3 份\n",
    "split_size = int(sample_num / K)  # 每份数据的大小  \n",
    "summary_step = 0\n",
    "global_step = 0\n",
    "valid_step = 6000\n",
    "decay = 0.75\n",
    "\n",
    "def add_train(valid_k, add_epoch=3, pre_model_num=6):\n",
    "    \"\"\"训练模型。\n",
    "    把第 valid_k 份数据留出来作为验证集；其余的数据作为训练集。\n",
    "    valid_k 取值为 0,1,...,K-1\n",
    "    \"\"\"\n",
    "    global summary_step   # 一旦这个值在函数内部改变了，就认为它是内部变量，所以要加上 global\n",
    "    global global_step\n",
    "    global_step = 45306\n",
    "    te_start = valid_k*split_size\n",
    "    te_end = min(te_start+split_size, sample_num)\n",
    "    print('***Begin training, valid_k=%d, [%d,%d]' % (valid_k, te_start, te_end))\n",
    "    tr_indexs = range(0,te_start) + range(te_end, sample_num)   # 训练数据的下标\n",
    "    data_train = BatchGenerator(X_train[tr_indexs], y_train[tr_indexs],shuffle=True)\n",
    "    model_path = '../ckpt/' + model_name    # 模型的保存位置\n",
    "    if not os.path.exists(model_path):\n",
    "        os.makedirs(model_path)         \n",
    "    model_path = model_path + '/' + 'cross' + str(valid_k) + '.ckpt'\n",
    "    tr_batch_num = int(data_train.y.shape[0] / tr_batch_size)  # 每个 epoch 中包含的 batch 数\n",
    "    # 导入已经训练好的模型\n",
    "    model_num = 6\n",
    "    best_model_path = model_path + '-' + str(model_num)  # 导入最优模型\n",
    "    saver.restore(sess, best_model_path)\n",
    "    last_f1 = 0.402\n",
    "    \n",
    "    print('Finished loading model.')\n",
    "    _lr = 1e-4\n",
    "    for epoch in xrange(add_epoch):\n",
    "        _lr = _lr * decay\n",
    "        print 'EPOCH %d， lr=%g' % (epoch+1, _lr)\n",
    "        time0 = time.time()\n",
    "        _costs = 0.0\n",
    "        for batch in xrange(tr_batch_num): \n",
    "            global_step += 1\n",
    "            summary_step += 1\n",
    "            if (batch+1) % valid_step == 0:    # 进行 valid 计算\n",
    "                valid_cost, precision, recall, f1 = valid_epoch()\n",
    "                print('Global_step=%d: valid cost=%g; p=%g, r=%g, f1=%g ' % (\n",
    "                        global_step, valid_cost, precision, recall, f1))\n",
    "                if (f1 > last_f1):\n",
    "                    last_f1 = f1\n",
    "                    model_num += 1\n",
    "                    save_path = saver.save(sess, model_path, global_step=model_num)\n",
    "                    print('the save path is ', save_path) \n",
    "                _lr = _lr * decay\n",
    "                print('===>_lr=%g' % _lr)\n",
    "            X_batch, y_batch = data_train.next_batch(tr_batch_size)\n",
    "            X1_batch = X_batch[:, :n_step1]\n",
    "            X2_batch = X_batch[:, n_step1:]\n",
    "            y_batch = to_categorical(y_batch)\n",
    "            feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch,  y_inputs:y_batch, lr:_lr,\n",
    "                         batch_size:tr_batch_size, keep_prob:0.5, tst:False, n_updates:global_step}\n",
    "            fetches = [merged, cost, train_op, update_op]\n",
    "            summary, _cost, _, _ = sess.run(fetches, feed_dict) # the cost is the mean cost of one batch\n",
    "            _costs += _cost\n",
    "            if global_step % 100:  #　验证一次\n",
    "                train_writer.add_summary(summary, summary_step)\n",
    "                X_batch, y_batch = data_valid.next_batch(tr_batch_size)\n",
    "                X1_batch = X_batch[:, :n_step1]\n",
    "                X2_batch = X_batch[:, n_step1:]\n",
    "                y_batch = to_categorical(y_batch)\n",
    "                feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch,  y_inputs:y_batch, lr:1e-5,\n",
    "                             batch_size:tr_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "                fetches = [merged, cost]\n",
    "                summary, _cost = sess.run(fetches, feed_dict)\n",
    "                test_writer.add_summary(summary, summary_step)\n",
    "        valid_cost, precision, recall, f1 = valid_epoch()  # #每个 epoch 结束后进行一次整体 valid\n",
    "        mean_cost = _costs / tr_batch_num\n",
    "        print('Globel_step=%d. Training cost=%g; Valid cost=%g; p=%g, r=%g, f1=%g; Speed=%g s/epoch' % (\n",
    "            global_step, mean_cost, valid_cost, precision, recall, f1, time.time()-time0) )\n",
    "        if (f1 > last_f1):\n",
    "            last_f1 = f1\n",
    "            model_num += 1\n",
    "            save_path = saver.save(sess, model_path, global_step=model_num)\n",
    "            print('the save path is ', save_path) \n",
    "    print('Finished training, valid_k=%d' % valid_k)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "MODEL 0/2\n",
      "***Begin training, valid_k=0, [0,966650]\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-6\n",
      "Finished loading model.\n",
      "EPOCH 1， lr=7.5e-05\n",
      "Global_step=51306: valid cost=0.00417318; p=1.38143, r=0.563478, f1=0.400228 \n",
      "===>_lr=5.625e-05\n",
      "Global_step=57306: valid cost=0.00412471; p=1.38439, r=0.564188, f1=0.400834 \n",
      "===>_lr=4.21875e-05\n",
      "Globel_step=60409. Training cost=0.00350541; Valid cost=0.00409828; p=1.38915, r=0.566374, f1=0.402336; Speed=4837.94 s/epoch\n",
      "('the save path is ', '../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-7')\n",
      "EPOCH 2， lr=3.16406e-05\n",
      "Global_step=66409: valid cost=0.00410749; p=1.38773, r=0.565415, f1=0.401734 \n",
      "===>_lr=2.37305e-05\n",
      "Global_step=72409: valid cost=0.00409201; p=1.38973, r=0.566048, f1=0.402221 \n",
      "===>_lr=1.77979e-05\n",
      "Globel_step=75512. Training cost=0.00340655; Valid cost=0.00408039; p=1.38975, r=0.566194, f1=0.402296; Speed=4882.3 s/epoch\n",
      "EPOCH 3， lr=1.33484e-05\n",
      "Global_step=81512: valid cost=0.0040821; p=1.38996, r=0.566339, f1=0.402387 \n",
      "('the save path is ', '../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-8')\n",
      "===>_lr=1.00113e-05\n",
      "Global_step=87512: valid cost=0.00408063; p=1.39045, r=0.566472, f1=0.402495 \n",
      "('the save path is ', '../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-9')\n",
      "===>_lr=7.50847e-06\n",
      "Globel_step=90615. Training cost=0.00336332; Valid cost=0.00408246; p=1.39045, r=0.566326, f1=0.402422; Speed=4981.15 s/epoch\n",
      "Finished training, valid_k=0\n",
      "MODEL 1/2\n",
      "***Begin training, valid_k=1, [966650,1933300]\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-6\n",
      "Finished loading model.\n",
      "EPOCH 1， lr=7.5e-05\n",
      "Global_step=51306: valid cost=0.0042038; p=1.38152, r=0.562691, f1=0.399837 \n",
      "===>_lr=5.625e-05\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-5-aab039bd0c37>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mvalid_k\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mxrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mK\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      2\u001b[0m     \u001b[0;32mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'MODEL %d/%d'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mvalid_k\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mK\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m     \u001b[0madd_train\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalid_k\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-4-1d40d67421d6>\u001b[0m in \u001b[0;36madd_train\u001b[0;34m(valid_k, add_epoch, pre_model_num)\u001b[0m\n\u001b[1;32m     72\u001b[0m                              batch_size:tr_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n\u001b[1;32m     73\u001b[0m                 \u001b[0mfetches\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mmerged\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcost\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 74\u001b[0;31m                 \u001b[0msummary\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_cost\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfetches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     75\u001b[0m                 \u001b[0mtest_writer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_summary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msummary\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msummary_step\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     76\u001b[0m         \u001b[0mvalid_cost\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprecision\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecall\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalid_epoch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m  \u001b[0;31m# #每个 epoch 结束后进行一次整体 valid\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/common/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m    787\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    788\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 789\u001b[0;31m                          run_metadata_ptr)\n\u001b[0m\u001b[1;32m    790\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    791\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/common/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m    995\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    996\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m--> 997\u001b[0;31m                              feed_dict_string, options, run_metadata)\n\u001b[0m\u001b[1;32m    998\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    999\u001b[0m       \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/common/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1130\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1131\u001b[0m       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,\n\u001b[0;32m-> 1132\u001b[0;31m                            target_list, options, run_metadata)\n\u001b[0m\u001b[1;32m   1133\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1134\u001b[0m       return self._do_call(_prun_fn, self._session, handle, feed_dict,\n",
      "\u001b[0;32m/home/common/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m   1137\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1138\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1139\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1140\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1141\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/common/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m   1119\u001b[0m         return tf_session.TF_Run(session, options,\n\u001b[1;32m   1120\u001b[0m                                  \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1121\u001b[0;31m                                  status, run_metadata)\n\u001b[0m\u001b[1;32m   1122\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1123\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msession\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "for valid_k in xrange(K):\n",
    "    print('MODEL %d/%d' % (valid_k,K-1))\n",
    "    add_train(valid_k)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 本地测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***Local Predicting 0/3\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-6\n",
      "local valid p=1.38878, r=0.565999, f1=0.402116; speed=38.8857 s/epoch\n",
      "Writed the scores into ../local_scores/textcnn-fc-drop-title-content-256-345-cross3cross0.npy, time 39.674 s\n",
      "Finished cross0, costed time 54.488 s\n",
      "***Local Predicting 1/3\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-6\n",
      "local valid p=1.38853, r=0.56587, f1=0.40203; speed=41.8943 s/epoch\n",
      "Writed the scores into ../local_scores/textcnn-fc-drop-title-content-256-345-cross3cross1.npy, time 49.899 s\n",
      "Finished cross1, costed time 63.6007 s\n",
      "***Local Predicting 2/3\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-6\n",
      "local valid p=1.38803, r=0.56593, f1=0.402018; speed=37.5196 s/epoch\n",
      "Writed the scores into ../local_scores/textcnn-fc-drop-title-content-256-345-cross3cross2.npy, time 38.2349 s\n",
      "Finished cross2, costed time 38.8201 s\n",
      "**Finished ALL.\n"
     ]
    }
   ],
   "source": [
    "sys.path.append('..')\n",
    "from evaluator import score_eval\n",
    "\n",
    "# X_valid = np.load('../data/X_valid.npy')\n",
    "# marked_labels_list = np.load('../data/marked_labels_list.npy')\n",
    "saver = tf.train.Saver()\n",
    "\n",
    "\n",
    "def local_predict(best_model, local_scores_path):\n",
    "    \"\"\"预测  valid 结果，并保存预测概率 到  scores.csv 文件中。\"\"\"\n",
    "    saver.restore(sess, best_model)\n",
    "    time0 = time.time()\n",
    "    X_te = X_valid\n",
    "    n_sample = X_te.shape[0]  # 测试样本数量\n",
    "    _batch_size = 256\n",
    "    fetches = [y_pred]   \n",
    "    predict_labels_list = list()  # 所有的预测结果\n",
    "    predict_scores = list()\n",
    "    for i in xrange(0, n_sample, _batch_size):\n",
    "        start = i\n",
    "        end = start+_batch_size\n",
    "        if end > n_sample:\n",
    "            end = n_sample\n",
    "            _batch_size = end - start\n",
    "        X_batch = X_te[start:end]\n",
    "        X1_batch = X_batch[:, :n_step1]\n",
    "        X2_batch = X_batch[:, n_step1:]\n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, lr:1e-5,\n",
    "                     batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "        predict_labels = sess.run(fetches, feed_dict)[0]\n",
    "        predict_scores.append(predict_labels)\n",
    "        predict_labels = map(lambda label: label.argsort()[-1:-6:-1], predict_labels) # 取最大的5个下标\n",
    "        predict_labels_list.extend(predict_labels)\n",
    "    predict_scores = np.asarray(predict_scores)\n",
    "    predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "    precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "    print('local valid p=%g, r=%g, f1=%g; speed=%g s/epoch' % ( precision, recall, f1, time.time()-time0) )\n",
    "    np.save(local_scores_path, predict_scores)\n",
    "    print('Writed the scores into %s, time %g s' % (local_scores_path, time.time()-time0))\n",
    "    \n",
    "best_epochs = [6,6,6]    # 每次 cross 训练中最好的迭代轮次\n",
    "K = 3                    # 一共有三个模型\n",
    "for k in xrange(K):\n",
    "    time0 = time.time()\n",
    "    print('***Local Predicting %d/%d' % (k,K))\n",
    "    best_model = '../ckpt/' + model_name + '/cross' + str(k) +'.ckpt'+ '-' + str(best_epochs[k])  # 导入最优模型\n",
    "    local_scores_path = '../local_scores/' + model_name + 'cross' + str(k) + '.npy'\n",
    "    local_predict(best_model, local_scores_path)\n",
    "    print('Finished cross%d, costed time %g s' % (k, time.time()-time0))\n",
    "print('**Finished ALL.')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 对测试数据进行预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 导入保存好的模型\n",
    "from tqdm import tqdm\n",
    "saver = tf.train.Saver()\n",
    "\n",
    "# 导入测试数据\n",
    "def predict(cross_num):\n",
    "    \"\"\"预测测试集结果，并保存到  result.csv 文件中。\"\"\"\n",
    "    saver.restore(sess, '../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross'+str(cross_num)+'.ckpt-6')\n",
    "    scores_path = '/home2/huangyongye/zhihu/scores/textcnn-fc-drop-title-content-256-345-cross3cross'+str(cross_num)+'.npy'\n",
    "    X1_te = np.load('../data/X_te_title_50.npy')\n",
    "    X2_te = np.load('../data/X_te_content_150.npy')\n",
    "    X_te = np.hstack([X1_te, X2_te])\n",
    "    n_sample = X_te.shape[0]  # 测试样本数量\n",
    "    _batch_size = 500\n",
    "    fetches = [y_pred]   \n",
    "    predict_labels_list = list()  # 所有的预测结果\n",
    "    predict_scores = list()\n",
    "    for i in tqdm(xrange(0, n_sample, _batch_size)):\n",
    "        start = i\n",
    "        end = start+_batch_size\n",
    "        if end > n_sample:\n",
    "            end = n_sample\n",
    "            _batch_size = end - start\n",
    "        X_batch = X_te[start:end]\n",
    "        X1_batch = X_batch[:, :n_step1]\n",
    "        X2_batch = X_batch[:, n_step1:]\n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, lr:1e-5,\n",
    "                     batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "        predict_labels = sess.run(fetches, feed_dict)[0]\n",
    "        predict_scores.append(predict_labels)\n",
    "    predict_scores = np.asarray(predict_scores)\n",
    "    predict_scores = np.vstack(predict_scores)\n",
    "    np.save(scores_path, predict_scores)\n",
    "    print('Finished saving the scores!predict_scores.shape=',predict_scores.shape)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross0.ckpt-6\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 435/435 [00:19<00:00, 22.79it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('Finished saving the scores!predict_scores.shape=', (217360, 1999))\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross1.ckpt-6\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 435/435 [00:18<00:00, 24.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('Finished saving the scores!predict_scores.shape=', (217360, 1999))\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/textcnn-fc-drop-title-content-256-345-cross3/cross2.ckpt-6\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 435/435 [00:18<00:00, 24.04it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('Finished saving the scores!predict_scores.shape=', (217360, 1999))\n"
     ]
    }
   ],
   "source": [
    "cross_nums = [0,1,2]    # 每次 cross 训练中最好的迭代轮次\n",
    "for cross_num in cross_nums:\n",
    "    predict(cross_num)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 在全部预测正确的情况下，理论值为：f1=0.713933\n",
    "precision=2.50273, recall=0.998873, f1=0.713933"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "在全部预测正确的情况下，理论值为：\n",
      "precision=2.50273, recall=0.998873, f1=0.713933\n"
     ]
    }
   ],
   "source": [
    "# 假设全部正确，f1 值最高能到多少\n",
    "def padding_label(labels):\n",
    "    \"\"\"把所有的label补齐到长度为 5\"\"\"\n",
    "    label_len = len(labels)\n",
    "    if label_len >= 5:\n",
    "        return labels[:5]\n",
    "    return np.hstack([labels, np.zeros(5-label_len, dtype=int) - 1])\n",
    "    \n",
    "\n",
    "marked_labels_list = data_valid.y.tolist() # 所有的标注结果\n",
    "predict_labels_list = map(padding_label, marked_labels_list)\n",
    "predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "print '在全部预测正确的情况下，理论值为：'\n",
    "print 'precision=%g, recall=%g, f1=%g' % (precision, recall, f1)"
   ]
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python [conda root]",
   "language": "python",
   "name": "conda-root-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
