{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "0812更新：\n",
    "\n",
    "combine-model： 把词向量和字向量信息全部丢进来训练。\n",
    "\n",
    "一共有4路输入，wd 和 ch 各两路。\n",
    "\n",
    "wd结构和 p3-3 一样，对 title 和 content 的输出求 max。ch的结构类似。\n",
    "\n",
    "对 title 和 content 的输出，不是拼起来，而是每一位再做一次 **max.**\n",
    "\n",
    "先用 256 个核与 [3457], 如果好的话可以再加大模型，加大窗口。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Starting ...\n",
      "Prepared, costed time 0.413691 s.\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt \n",
    "from gensim.models import KeyedVectors\n",
    "import pickle\n",
    "import os\n",
    "import sys\n",
    "import shutil\n",
    "import time\n",
    "\n",
    "\n",
    "time0 = time.time()\n",
    "print('Starting ...')\n",
    "model_name = 'c1-1-cnn-max-256-23457'                    # 模型名称\n",
    "wd_embedding = np.load('../data/W_embedding.npy').astype(np.float32)                      # 导入预训练好的词向量\n",
    "ch_embedding = np.load('/home1/huangyongye/mygithub/zhihu/data/ch_W_embedding.npy').astype(np.float32)  # 字向量\n",
    "model_path = '../ckpt/' + model_name + '/'                  # 模型保存位置\n",
    "summary_path = '../summary/' + model_name + '/'             # summary 位置\n",
    "result_path = '../result/' + model_name + '.csv'            # result.csv 位置\n",
    "scores_path = '../scores/' + model_name + '.npy'            # scores.npy 位置\n",
    "local_scores_path = '../local_scores/' + model_name + '.npy'\n",
    "\n",
    "\n",
    "if not os.path.exists(model_path):\n",
    "    os.makedirs(model_path)         \n",
    "model_path = model_path + 'model.ckpt'\n",
    "if os.path.exists(summary_path):   # 删除原来的 summary 文件，避免重合\n",
    "    print('removed the existing summary files.')\n",
    "    shutil.rmtree(summary_path)\n",
    "os.makedirs(summary_path)          # 然后再次创建\n",
    "    \n",
    "# ##################### config ######################\n",
    "n_step1 = wd_title_len = 50                   # title句子长度\n",
    "n_step2 = wd_content_len = 150                # content 长度\n",
    "n_step3 = ch_title_len = 52\n",
    "n_step4 = ch_content_len = 300\n",
    "input_size = embedding_size = 256       # 字向量长度\n",
    "n_class = 1999                          # 类别总数\n",
    "filter_sizes = [2,3,4,5,7]                  # 卷积核大小\n",
    "n_filter = 256                          # 每种卷积核的个数\n",
    "fc_hidden_size = 1024                   # fc 层节点数\n",
    "n_filter_total = n_filter * len(filter_sizes)\n",
    "print('Prepared, costed time %g s.' % (time.time() - time0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Building model ...\n",
      "Finished creating the TextCNN model.\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "config = tf.ConfigProto()\n",
    "config.gpu_options.allow_growth = True\n",
    "sess = tf.Session(config=config)\n",
    "from tensorflow.contrib import rnn\n",
    "import tensorflow.contrib.layers as layers\n",
    "\n",
    "'''\n",
    "双端 GRU，知乎问题多标签分类。\n",
    "'''\n",
    "print('Building model ...')\n",
    "keep_prob = tf.placeholder(tf.float32, [])\n",
    "lr = tf.placeholder(tf.float32)\n",
    "batch_size = tf.placeholder(tf.int32, [])  # 注意类型必须为 tf.int32\n",
    "tst = tf.placeholder(tf.bool)\n",
    "n_updates = tf.placeholder(tf.int32)      # training iteration,传入 bn 层\n",
    "update_emas = list()                       # BN 层中所有的更新操作\n",
    "\n",
    "\n",
    "def weight_variable(shape, name, initializer=None):\n",
    "    \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n",
    "    if initializer is None:\n",
    "        initializer = tf.contrib.layers.xavier_initializer()\n",
    "    return tf.get_variable(name=name, shape=shape, initializer=initializer, dtype=tf.float32)\n",
    "\n",
    "def bias_variable(shape, name):\n",
    "    \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n",
    "    initial = tf.constant(0.1, shape=shape)\n",
    "    return tf.get_variable(name=name, initializer=initial, dtype=tf.float32)\n",
    "\n",
    "def batchnorm(Ylogits, is_test, num_updates, offset, convolutional=False):\n",
    "    \"\"\"batchnormalization.\n",
    "    Args:\n",
    "        Ylogits: 1D向量或者是3D的卷积结果。\n",
    "        num_updates: 迭代的global_step\n",
    "        offset：表示beta，全局均值；在 RELU 激活中一般初始化为 0.1。\n",
    "        scale：表示lambda，全局方差；在 sigmoid 激活中需要，这 RELU 激活中作用不大。\n",
    "        m: 表示batch均值；v:表示batch方差。\n",
    "        bnepsilon：一个很小的浮点数，防止除以 0.\n",
    "    Returns:\n",
    "        Ybn: 和 Ylogits 的维度一样，就是经过 Batch Normalization 处理的结果。\n",
    "        update_moving_everages：更新mean和variance，主要是给最后的 test 使用。\n",
    "    \"\"\"\n",
    "    exp_moving_avg = tf.train.ExponentialMovingAverage(0.999, num_updates) # adding the iteration prevents from averaging across non-existing iterations\n",
    "    bnepsilon = 1e-5\n",
    "    if convolutional:\n",
    "        mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])\n",
    "    else:\n",
    "        mean, variance = tf.nn.moments(Ylogits, [0])\n",
    "    update_moving_everages = exp_moving_avg.apply([mean, variance])\n",
    "    m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)\n",
    "    v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)\n",
    "    Ybn = tf.nn.batch_normalization(Ylogits, m, v, offset, None, bnepsilon)\n",
    "    return Ybn, update_moving_everages\n",
    "\n",
    "with tf.device('/gpu:0'):\n",
    "    with tf.variable_scope('embedding') as vs:\n",
    "        word_embedding = tf.get_variable(name=\"word_embedding\", shape=wd_embedding.shape, \n",
    "                            initializer=tf.constant_initializer(wd_embedding), trainable=True)   # fine-tune\n",
    "        char_embedding = tf.get_variable(name=\"char_embedding\", shape=ch_embedding.shape, \n",
    "                            initializer=tf.constant_initializer(ch_embedding), trainable=True)   # fine-tune\n",
    "        \n",
    "with tf.name_scope('Inputs'):\n",
    "    X1_inputs = tf.placeholder(tf.int64, [None, n_step1], name='X1_input')  # wd_title\n",
    "    X2_inputs = tf.placeholder(tf.int64, [None, n_step2], name='X2_input')  # wd_content\n",
    "    X3_inputs = tf.placeholder(tf.int64, [None, n_step3], name='X3_input')  # ch_title\n",
    "    X4_inputs = tf.placeholder(tf.int64, [None, n_step4], name='X4_input')  # ch_content\n",
    "    y_inputs = tf.placeholder(tf.float32, [None, n_class], name='y_input')    \n",
    "    inputs1 = tf.nn.embedding_lookup(word_embedding, X1_inputs)\n",
    "    inputs2 = tf.nn.embedding_lookup(word_embedding, X2_inputs)\n",
    "    inputs3 = tf.nn.embedding_lookup(char_embedding, X3_inputs)\n",
    "    inputs4 = tf.nn.embedding_lookup(char_embedding, X4_inputs)\n",
    "\n",
    "\n",
    "def textcnn(inputs, n_step):\n",
    "    \"\"\"build the TextCNN network. Return the h_drop\"\"\"\n",
    "    # inputs.shape = [batchsize, n_step, embedding_size]\n",
    "    inputs = tf.expand_dims(inputs, -1)\n",
    "    pooled_outputs = list()\n",
    "    for i, filter_size in enumerate(filter_sizes):\n",
    "        with tf.variable_scope(\"conv-maxpool-%s\" % filter_size):\n",
    "            # Convolution Layer\n",
    "            filter_shape = [filter_size, embedding_size, 1, n_filter]\n",
    "            W_filter = weight_variable(shape=filter_shape, name='W_filter')\n",
    "            beta = bias_variable(shape=[n_filter], name='beta_filter')\n",
    "            tf.summary.histogram('beta_filter', beta)\n",
    "            conv = tf.nn.conv2d(inputs, W_filter, strides=[1, 1, 1, 1], padding=\"VALID\", name=\"conv\")\n",
    "            conv_bn, update_ema = batchnorm(conv, tst, n_updates, beta, convolutional=True)    # 在激活层前面加 BN\n",
    "            # Apply nonlinearity, batch norm scaling is not useful with relus\n",
    "            # batch norm offsets are used instead of biases,使用 BN 层的 offset，不要 biases\n",
    "            h = tf.nn.relu(conv_bn, name=\"filter_relu\")\n",
    "            # Maxpooling over the outputs\n",
    "            pooled = tf.nn.max_pool(h,ksize=[1, n_step - filter_size + 1, 1, 1],\n",
    "                strides=[1, 1, 1, 1],padding='VALID',name=\"pool\")\n",
    "            pooled_outputs.append(pooled)\n",
    "            update_emas.append(update_ema)\n",
    "    h_pool = tf.concat(pooled_outputs, 3)\n",
    "    h_pool_flat = tf.reshape(h_pool, [-1, n_filter_total]) \n",
    "    return h_pool_flat    # shape = [-1, n_filter_total]\n",
    "\n",
    "\n",
    "with tf.variable_scope('wd-encoder'):    \n",
    "    with tf.variable_scope('wd-title'):\n",
    "        wd_output_title = textcnn(inputs1, n_step1)\n",
    "        wd_output_title = tf.expand_dims(wd_output_title, 0)\n",
    "    with tf.variable_scope('wd-content'):\n",
    "        wd_output_content = textcnn(inputs2, n_step2)\n",
    "        wd_output_content = tf.expand_dims(wd_output_content, 0)\n",
    "    wd_output = tf.concat([wd_output_title, wd_output_content], axis=0)\n",
    "    wd_output = tf.reduce_max(wd_output, axis=0)    # shape=[batch_size, n_filter_total]\n",
    "    \n",
    "with tf.variable_scope('ch-encoder'):\n",
    "    with tf.variable_scope('ch-title'):\n",
    "        ch_output_title = textcnn(inputs3, n_step3)\n",
    "        ch_output_title = tf.expand_dims(ch_output_title, 0)\n",
    "    with tf.variable_scope('ch-content'):\n",
    "        ch_output_content = textcnn(inputs4, n_step4)\n",
    "        ch_output_content = tf.expand_dims(ch_output_content, 0)\n",
    "    ch_output = tf.concat([ch_output_title, ch_output_content], axis=0)\n",
    "    ch_output = tf.reduce_max(ch_output, axis=0)    # shape=[batch_size, n_filter_total]\n",
    "    \n",
    "    \n",
    "with tf.variable_scope('fc-bn-layer'):\n",
    "    output = tf.concat([wd_output, ch_output], axis=1)  # shape=[batch_size, n_filter_total*2]\n",
    "    W_fc = weight_variable([n_filter_total*2, fc_hidden_size], name='Weight_fc')\n",
    "    tf.summary.histogram('W_fc', W_fc)\n",
    "    h_fc = tf.matmul(output, W_fc, name='h_fc')\n",
    "    beta_fc = bias_variable([fc_hidden_size], name=\"beta_fc\")\n",
    "    tf.summary.histogram('beta_fc', beta_fc)\n",
    "    fc_bn, update_ema_fc = batchnorm(h_fc, tst, n_updates, beta_fc, convolutional=False)\n",
    "    update_emas.append(update_ema_fc)\n",
    "    fc_bn_relu = tf.nn.relu(fc_bn, name=\"relu\")\n",
    "    fc_bn_drop = tf.nn.dropout(fc_bn_relu, keep_prob, name=\"fc_dropout\")\n",
    "\n",
    "with tf.variable_scope('out_layer'):\n",
    "    W_out = weight_variable(shape=[fc_hidden_size, n_class], name='Weight_out')\n",
    "    tf.summary.histogram('Weight_out', W_out)\n",
    "    b_out = bias_variable([n_class], name='bias_out') \n",
    "    tf.summary.histogram('bias_out', b_out)\n",
    "    y_pred = tf.nn.xw_plus_b(fc_bn_drop, W_out, b_out, name='y_pred')  #每个类别的分数 scores\n",
    "    \n",
    "with tf.name_scope('cost'):\n",
    "    cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_pred, labels=y_inputs))\n",
    "    tf.summary.scalar('cost', cost)\n",
    "\n",
    "#  ------------- 优化器设置 ---------------------\n",
    "with tf.name_scope('AdamOptimizer'):\n",
    "    optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n",
    "    train_op = optimizer.minimize(cost)\n",
    "    update_op = tf.group(*update_emas)   # 更新 BN 参数\n",
    "\n",
    "# summary\n",
    "merged = tf.summary.merge_all() # summary\n",
    "train_writer = tf.summary.FileWriter(summary_path + 'train', sess.graph)\n",
    "test_writer = tf.summary.FileWriter(summary_path + 'test')\n",
    "print 'Finished creating the TextCNN model.'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "n_tr_batch=22656\n",
      "n_va_batches=782\n",
      "Every thing prepared!\n"
     ]
    }
   ],
   "source": [
    "sys.path.append('..')\n",
    "from data_helpers import BatchGenerator\n",
    "from data_helpers import to_categorical\n",
    "from evaluator import score_eval\n",
    "\n",
    "\n",
    "wd_train_path = '/home/huangyongye/zhihu_data/data_train/'\n",
    "wd_valid_path = '/home/huangyongye/zhihu_data/data_valid/'\n",
    "ch_train_path = '../ch-data/data_train/'\n",
    "ch_valid_path = '../ch-data/data_valid/'\n",
    "\n",
    "\n",
    "tr_batches = os.listdir(wd_train_path)   # batch 文件名列表\n",
    "va_batches = os.listdir(wd_valid_path)\n",
    "n_tr_batches = len(tr_batches)\n",
    "n_va_batches = len(va_batches)\n",
    "\n",
    "# 测试用\n",
    "# n_tr_batches = 500  \n",
    "# n_va_batches = 50\n",
    "print('n_tr_batch=%d' % n_tr_batches)\n",
    "print('n_va_batches=%d' % n_va_batches)\n",
    "\n",
    "\n",
    "def get_batch(batch_id, mode='train'):\n",
    "    \"\"\"get a batch from data_path.\n",
    "    model: 'train', 'valid' 三种方式。\n",
    "    \"\"\"\n",
    "    if mode == 'train':\n",
    "        wd_path = wd_train_path\n",
    "        ch_path = ch_train_path \n",
    "    if mode == 'valid':\n",
    "        wd_path = wd_valid_path\n",
    "        ch_path = ch_valid_path\n",
    "    wd_batch = np.load(wd_path + str(batch_id) + '.npz')\n",
    "    wd_X_batch = wd_batch['X']\n",
    "    X1_batch = wd_X_batch[:, :n_step1]\n",
    "    X2_batch = wd_X_batch[:, n_step1:]\n",
    "    ch_batch = np.load(ch_path + str(batch_id) + '.npz')\n",
    "    ch_X_batch = ch_batch['X']\n",
    "    X3_batch = ch_X_batch[:, :n_step3]\n",
    "    X4_batch = ch_X_batch[:, n_step3:]\n",
    "    y_batch = ch_batch['y']\n",
    "    return [X1_batch, X2_batch, X3_batch, X4_batch, y_batch]\n",
    "\n",
    "\n",
    "def valid_epoch():\n",
    "    \"\"\"Test on the valid data.\"\"\"\n",
    "    _costs = 0.0\n",
    "    predict_labels_list = list()  # 所有的预测结果\n",
    "    marked_labels_list = list()   # 真实标签\n",
    "    for i in xrange(n_va_batches):\n",
    "        [X1_batch, X2_batch, X3_batch, X4_batch, y_batch] = get_batch(i, mode='valid')\n",
    "        marked_labels_list.extend(y_batch)\n",
    "        y_batch = to_categorical(y_batch)\n",
    "        _batch_size = len(y_batch)\n",
    "        fetches = [merged, cost, y_pred]  \n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, X3_inputs:X3_batch, X4_inputs:X4_batch, y_inputs:y_batch, \n",
    "                     batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "        summary, _cost, predict_labels = sess.run(fetches, feed_dict)\n",
    "        _costs += _cost\n",
    "        predict_labels = map(lambda label: label.argsort()[-1:-6:-1], predict_labels) # 取最大的5个下标\n",
    "        predict_labels_list.extend(predict_labels)\n",
    "    predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "    precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "    mean_cost = _costs / n_va_batches\n",
    "    return mean_cost, precision, recall, f1\n",
    "\n",
    "print('Every thing prepared!')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##  模型训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "之前的训练速度 9334.15 s/epoch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tr_batch_num=22656\n",
      "OK!\n"
     ]
    }
   ],
   "source": [
    "_lr = 5e-4 \n",
    "decay = 0.65 \n",
    "# 正式\n",
    "max_epoch = 1            # 超多 max_epoch 以后开始降低 lr\n",
    "max_max_epoch = 9       # 最多迭代的次数\n",
    "valid_step = 10000        # 每 valid_step 就进行一次 valid 运算\n",
    "\n",
    "# 测试\n",
    "# max_epoch = 1            # 超多 max_epoch 以后开始降低 lr\n",
    "# max_max_epoch = 3       # 最多迭代的次数\n",
    "# decay_step = 500\n",
    "# valid_step = 250\n",
    "\n",
    "print('tr_batch_num=%d' % n_tr_batches)\n",
    "saver = tf.train.Saver(max_to_keep=3)           # 最多保存的模型数量\n",
    "sess.run(tf.global_variables_initializer())\n",
    "last_f1 = 0.40\n",
    "model_num = 0\n",
    "global_step = 0\n",
    "print('OK!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "EPOCH 1, lr= 0.0005\n",
      "Global_step=10000: valid cost=0.00931932; p=1.08616, r=0.454465, f1=0.320403, time=5506.51 s\n",
      "Global_step=20000: valid cost=0.00977956; p=1.11548, r=0.469489, f1=0.33042, time=5086.92 s\n",
      "EPOCH 2, lr= 0.000325\n",
      "Global_step=30000: valid cost=0.00764539; p=1.25045, r=0.519151, f1=0.366847, time=5321.01 s\n",
      "Global_step=40000: valid cost=0.0077155; p=1.26423, r=0.525042, f1=0.370974, time=4941.39 s\n",
      "EPOCH 3, lr= 0.00021125\n",
      "Global_step=50000: valid cost=0.0054903; p=1.35413, r=0.556408, f1=0.394365, time=5057.13 s\n",
      "Global_step=60000: valid cost=0.00540052; p=1.36132, r=0.559559, f1=0.396558, time=5109.86 s\n",
      "('the save path is ', '../ckpt/c1-1-cnn-max-256-23457/model.ckpt-1')\n",
      "Begin updating embedding.\n",
      "EPOCH 4, lr= 0.000137313\n",
      "Global_step=70000: valid cost=0.00474504; p=1.3984, r=0.572519, f1=0.406212, time=5176.01 s\n",
      "('the save path is ', '../ckpt/c1-1-cnn-max-256-23457/model.ckpt-2')\n",
      "Global_step=80000: valid cost=0.00499403; p=1.39187, r=0.569932, f1=0.404358, time=5105.15 s\n",
      "Global_step=90000: valid cost=0.0048734; p=1.39593, r=0.571138, f1=0.405308, time=5125.43 s\n",
      "EPOCH 5, lr= 8.92531e-05\n",
      "Global_step=100000: valid cost=0.00430686; p=1.41255, r=0.577851, f1=0.41009, time=5109.47 s\n",
      "('the save path is ', '../ckpt/c1-1-cnn-max-256-23457/model.ckpt-3')\n",
      "Global_step=110000: valid cost=0.00447251; p=1.4159, r=0.57889, f1=0.410896, time=5173.49 s\n",
      "('the save path is ', '../ckpt/c1-1-cnn-max-256-23457/model.ckpt-4')\n",
      "EPOCH 6, lr= 5.80145e-05\n",
      "Global_step=120000: valid cost=0.00412425; p=1.42391, r=0.582024, f1=0.413149, time=5126.39 s\n",
      "('the save path is ', '../ckpt/c1-1-cnn-max-256-23457/model.ckpt-5')\n",
      "Global_step=130000: valid cost=0.0041362; p=1.42271, r=0.580972, f1=0.412518, time=5129.63 s\n",
      "EPOCH 7, lr= 3.77094e-05\n",
      "Global_step=140000: valid cost=0.0040443; p=1.42672, r=0.582289, f1=0.413519, time=5123.06 s\n",
      "('the save path is ', '../ckpt/c1-1-cnn-max-256-23457/model.ckpt-6')\n"
     ]
    }
   ],
   "source": [
    "time0 = time.time()\n",
    "\n",
    "for epoch in xrange(max_max_epoch):\n",
    "    batch_indexs = np.random.permutation(n_tr_batches)  # shuffle the training data\n",
    "    if epoch >= max_epoch:\n",
    "        _lr = _lr * decay                 # 降低学习率\n",
    "    if epoch == 3:   # 中间保存一次，一方断电\n",
    "        if model_num == 0:\n",
    "            model_num += 1\n",
    "            save_path = saver.save(sess, model_path, global_step=model_num)\n",
    "            print('the save path is ', save_path)\n",
    "        print('Begin updating embedding.')\n",
    "    print 'EPOCH %d, lr= %g' % (epoch+1, _lr)    \n",
    "    for batch in xrange(n_tr_batches): \n",
    "        global_step += 1\n",
    "        if (global_step+1) % valid_step == 0:    # 进行 valid 计算\n",
    "            valid_cost, precision, recall, f1 = valid_epoch()\n",
    "            print('Global_step=%d: valid cost=%g; p=%g, r=%g, f1=%g, time=%g s' % (\n",
    "                    global_step+1, valid_cost, precision, recall, f1, time.time()-time0))\n",
    "            time0 = time.time()\n",
    "            if (f1 > last_f1):\n",
    "                last_f1 = f1\n",
    "                model_num += 1\n",
    "                save_path = saver.save(sess, model_path, global_step=model_num)\n",
    "                print('the save path is ', save_path) \n",
    "                \n",
    "        batch_id = batch_indexs[batch]\n",
    "        [X1_batch, X2_batch, X3_batch, X4_batch, y_batch] = get_batch(batch_id, mode='train')\n",
    "        y_batch = to_categorical(y_batch)\n",
    "        _batch_size = len(y_batch)\n",
    "        fetches = [merged, cost, train_op, update_op]\n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, X3_inputs:X3_batch, X4_inputs:X4_batch, y_inputs:y_batch, \n",
    "                     batch_size:_batch_size, lr:_lr, keep_prob:0.5, tst:False, n_updates:global_step}\n",
    "        summary, _cost, _, _ = sess.run(fetches, feed_dict) # the cost is the mean cost of one batch\n",
    "        if global_step % 500:\n",
    "            train_writer.add_summary(summary, global_step)\n",
    "            batch_id = np.random.randint(0, n_va_batches)   # 随机选一个验证batch\n",
    "            [X1_batch, X2_batch, X3_batch, X4_batch, y_batch] = get_batch(batch_id, mode='valid')\n",
    "            y_batch = to_categorical(y_batch)\n",
    "            _batch_size = len(y_batch)\n",
    "            feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, X3_inputs:X3_batch, X4_inputs:X4_batch, y_inputs:y_batch, \n",
    "                         batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "            fetches = [merged, cost]\n",
    "            summary, _cost = sess.run(fetches, feed_dict)\n",
    "            test_writer.add_summary(summary, global_step)\n",
    "\n",
    "valid_cost, precision, recall, f1 = valid_epoch()  # # 每个 epoch 进行一次验证 valid\n",
    "print('Global_step=%d;  valid cost=%g; p=%g, r=%g, f1=%g; speed=%g s/epoch' % (\n",
    "    global_step+1, valid_cost, precision, recall, f1, time.time()-time0) )\n",
    "if (f1 > last_f1):\n",
    "    model_num += 1\n",
    "    save_path = saver.save(sess, model_path, global_step=model_num)\n",
    "    print('the save path is ', save_path) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Global_step=172135;  valid cost=0.00400627; p=1.42615, r=0.582101, f1=0.413376; speed=1391.4 s/epoch\n"
     ]
    }
   ],
   "source": [
    "valid_cost, precision, recall, f1 = valid_epoch()  # # 每个 epoch 进行一次验证 valid\n",
    "print('Global_step=%d;  valid cost=%g; p=%g, r=%g, f1=%g; speed=%g s/epoch' % (\n",
    "    global_step+1, valid_cost, precision, recall, f1, time.time()-time0) )\n",
    "if (f1 > last_f1):\n",
    "    model_num += 1\n",
    "    save_path = saver.save(sess, model_path, global_step=model_num)\n",
    "    print('the save path is ', save_path) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- Bi-LSTM 模型<br/>\n",
    "**batch_size=256，迭代12个epoch，基本收敛。结果： 验证集 f1=0.38618； 提交结果 0.3873186**\n",
    "- Bi-GRU 模型<br/>\n",
    "**batch_size=256，迭代15个epoch。大概在 13 个 epoch 就收敛了。结果： 验证集 f1=0.390534； 提交结果 0.39198**\n",
    "- attention-Bi-GRU 模型<br/>\n",
    "**batch_size=256，迭代18个epoch。在16个epoch收敛。结果：验证集 f1=f1=0.391734 ；提交结果 0.39310**\n",
    "\n",
    "\n",
    "- textcnn-256 lr=1e-3, decay=0.65, dropout=0.5, 迭代6次基本收敛， f1=0.388\n",
    "- textcnn-256-bn lr=1.5E-3，decay=0.65, dropout=0.5, 6次基本收敛，8次好一点， f1=0.389\n",
    "- textcnn-fc-drop 迭代 4.5 个epoch 收敛。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 本地测试\n",
    "使用 seed13 的前 10万条数据."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 1/782 [00:00<02:30,  5.20it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local predicting ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 782/782 [01:19<00:00, 11.93it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local valid p=1.42715, r=0.582772, f1=0.413798; speed=79.8017 s/epoch\n",
      "Writed the scores into ../local_scores/c1-1-cnn-max-256-23457.npy, time 80.3177 s\n"
     ]
    }
   ],
   "source": [
    "# 导入保存好的模型\n",
    "# saver = tf.train.Saver()\n",
    "# best_model_path = model_path + '-' + str(8)  # 导入最优模型\n",
    "# saver.restore(sess, best_model_path)\n",
    "# print('Finished loading model.')\n",
    "\n",
    "from tqdm import tqdm\n",
    "local_scores_path = '../local_scores/' + model_name + '.npy'\n",
    "\n",
    "# 导入测试数据\n",
    "def local_predict(scores_path=local_scores_path):\n",
    "    \"\"\"预测  valid 结果，并保存预测概率 到  scores.csv 文件中。\"\"\"\n",
    "    print('local predicting ...')\n",
    "    time0 = time.time()\n",
    "    fetches = [y_pred]   \n",
    "    predict_labels_list = list()  # 所有的预测结果\n",
    "    predict_scores = list()\n",
    "    marked_labels_list = list()   # 真实标签\n",
    "    for i in tqdm(xrange(n_va_batches)):\n",
    "        [X1_batch, X2_batch, X3_batch, X4_batch, y_batch] = get_batch(i, mode='valid')\n",
    "        marked_labels_list.extend(y_batch)\n",
    "        y_batch = to_categorical(y_batch)\n",
    "        _batch_size = len(y_batch)\n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, X3_inputs:X3_batch, X4_inputs:X4_batch, y_inputs:y_batch, \n",
    "                         batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "        predict_labels = sess.run(fetches, feed_dict)[0]\n",
    "        predict_scores.append(predict_labels)\n",
    "        predict_labels = map(lambda label: label.argsort()[-1:-6:-1], predict_labels) # 取最大的5个下标\n",
    "        predict_labels_list.extend(predict_labels)\n",
    "    predict_scores = np.asarray(predict_scores)\n",
    "    predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "    precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "    print('local valid p=%g, r=%g, f1=%g; speed=%g s/epoch' % ( precision, recall, f1, time.time()-time0) )\n",
    "    np.save(local_scores_path, predict_scores)\n",
    "    print('Writed the scores into %s, time %g s' % (local_scores_path, time.time()-time0))\n",
    "    \n",
    "local_predict()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 取 epoch 平均"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local predicting ...\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/c1-1-cnn-max-256-23457/model.ckpt-6\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      "  0%|          | 0/782 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finished loading model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 782/782 [01:20<00:00,  9.67it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local valid p=1.42672, r=0.582289, f1=0.413519; speed=81.3728 s/epoch\n",
      "local predicting ...\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/c1-1-cnn-max-256-23457/model.ckpt-7\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 1/782 [00:00<01:28,  8.84it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finished loading model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 782/782 [01:19<00:00,  9.85it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local valid p=1.42631, r=0.582554, f1=0.413618; speed=79.8682 s/epoch\n",
      "local predicting ...\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/c1-1-cnn-max-256-23457/model.ckpt-8\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 1/782 [00:00<01:28,  8.82it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finished loading model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 782/782 [01:20<00:00, 11.12it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local valid p=1.42715, r=0.582772, f1=0.413798; speed=80.9862 s/epoch\n",
      "p=1.42997\tr=0.584042\tf1=0.414676;\n",
      "save scores to ../local_scores/c1-1-cnn-max-256-23457-all.npy\n"
     ]
    }
   ],
   "source": [
    "# 导入保存好的模型\n",
    "# saver = tf.train.Saver()\n",
    "# best_model_path = model_path + '-' + str(8)  # 导入最优模型\n",
    "# saver.restore(sess, best_model_path)\n",
    "# print('Finished loading model.')\n",
    "\n",
    "from tqdm import tqdm\n",
    "\n",
    "marked_labels_list = np.load('../data/marked_labels_list.npy')\n",
    "global_step =100000\n",
    "# # 求 softmax\n",
    "def _softmax(score):\n",
    "    \"\"\"对一个样本的输出类别概率进行 softmax 归一化.\n",
    "    score: arr.shape=[1999].\n",
    "    \"\"\"\n",
    "    max_sc = np.max(score)   # 最大分数\n",
    "    score = score - max_sc\n",
    "    exp_sc = np.exp(score)\n",
    "    sum_exp_sc = np.sum(exp_sc)\n",
    "    softmax_sc = exp_sc / sum_exp_sc\n",
    "    return softmax_sc    # 归一化的结果\n",
    "    \n",
    "def softmax(scores):\n",
    "    \"\"\"对所有样本的输出概率进行 softmax 归一化处理。\n",
    "    scores: arr.shape=[n_sample, 1999].\n",
    "    \"\"\"\n",
    "    softmax_scs = map(_softmax, scores)\n",
    "    return np.asarray(softmax_scs)\n",
    "\n",
    "\n",
    "# 导入测试数据\n",
    "def local_predict(epoch, scores_path=local_scores_path):\n",
    "    \"\"\"预测  valid 结果，并保存预测概率 到  scores.csv 文件中。\"\"\"\n",
    "    print('local predicting ...')\n",
    "    saver = tf.train.Saver()\n",
    "    best_model_path = model_path + '-' + str(epoch)  # 导入最优模型\n",
    "    saver.restore(sess, best_model_path)\n",
    "    print('Finished loading model.')\n",
    "\n",
    "    time0 = time.time()\n",
    "    fetches = [y_pred]   \n",
    "    predict_labels_list = list()  # 所有的预测结果\n",
    "    predict_scores = list()\n",
    "    marked_labels_list = list()   # 真实标签\n",
    "    for i in tqdm(xrange(n_va_batches)):\n",
    "        [X1_batch, X2_batch, X3_batch, X4_batch, y_batch] = get_batch(i, mode='valid')\n",
    "        marked_labels_list.extend(y_batch)\n",
    "        y_batch = to_categorical(y_batch)\n",
    "        _batch_size = len(y_batch)\n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, X3_inputs:X3_batch, X4_inputs:X4_batch, y_inputs:y_batch, \n",
    "                         batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "        predict_labels = sess.run(fetches, feed_dict)[0]\n",
    "        predict_scores.append(predict_labels)\n",
    "        predict_labels = map(lambda label: label.argsort()[-1:-6:-1], predict_labels) # 取最大的5个下标\n",
    "        predict_labels_list.extend(predict_labels)\n",
    "    predict_scores = np.asarray(predict_scores)\n",
    "    predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "    precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "    print('local valid p=%g, r=%g, f1=%g; speed=%g s/epoch' % ( precision, recall, f1, time.time()-time0) )\n",
    "#     np.save(local_scores_path, predict_scores)\n",
    "#     print('Writed the scores into %s, time %g s' % (local_scores_path, time.time()-time0))\n",
    "    return predict_scores   # 返回结果\n",
    "    \n",
    "local_scores_path = '../local_scores/' + model_name + '-all.npy'\n",
    "sum_scores = np.zeros((len(marked_labels_list), 1999), dtype=float)   \n",
    "last_f1 = f1=0.413798\n",
    "valid_epoch = [6,7,8]\n",
    "for epoch in valid_epoch:\n",
    "    score =  np.vstack(local_predict(epoch))\n",
    "    sum_scores = sum_scores + score\n",
    "predict_labels_list = map(lambda label: label.argsort()[-1:-6:-1], sum_scores) # 取最大的5个下标\n",
    "predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "print('p=%g\\tr=%g\\tf1=%g;' % ( precision, recall, f1))\n",
    "if f1 > last_f1:\n",
    "    np.save(local_scores_path, sum_scores)\n",
    "    print('save scores to %s' % local_scores_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 对测试数据进行预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def get_test_batch(batch_id):\n",
    "    \"\"\"get a batch from data_path.\n",
    "    model: 'train', 'valid' 三种方式。\n",
    "    \"\"\"\n",
    "    wd_path = '../wd-data/data_test/'\n",
    "    ch_path = '../ch-data/data_test/'\n",
    "    wd_X_batch = np.load(wd_path + str(batch_id) + '.npy')\n",
    "    X1_batch = wd_X_batch[:, :n_step1]\n",
    "    X2_batch = wd_X_batch[:, n_step1:]\n",
    "    ch_X_batch = np.load(ch_path + str(batch_id) + '.npy')\n",
    "    X3_batch = ch_X_batch[:, :n_step3]\n",
    "    X4_batch = ch_X_batch[:, n_step3:]\n",
    "    return [X1_batch, X2_batch, X3_batch, X4_batch]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local predicting ...\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/c1-1-cnn-max-256-23457/model.ckpt-6\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 1/1699 [00:00<03:42,  7.65it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finished loading model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1699/1699 [02:25<00:00,  7.51it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local predicting ...\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/c1-1-cnn-max-256-23457/model.ckpt-7\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 2/1699 [00:00<02:41, 10.50it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finished loading model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1699/1699 [02:24<00:00, 11.78it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "local predicting ...\n",
      "INFO:tensorflow:Restoring parameters from ../ckpt/c1-1-cnn-max-256-23457/model.ckpt-8\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 2/1699 [00:00<02:31, 11.19it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finished loading model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1699/1699 [02:24<00:00, 11.77it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('sum_scores.shape=', (217360, 1999))\n",
      "Finished.\n"
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "n_te_batches = len(os.listdir('../ch-data/data_test/'))\n",
    "global_step = 100000\n",
    "\n",
    "# 导入测试数据\n",
    "def predict(epoch):\n",
    "    \"\"\"预测测试集结果，并保存到  result.csv 文件中。\"\"\"\n",
    "    print('local predicting ...')\n",
    "    saver = tf.train.Saver()\n",
    "    best_model_path = model_path + '-' + str(epoch)  # 导入最优模型\n",
    "    saver.restore(sess, best_model_path)\n",
    "    print('Finished loading model.')\n",
    "    fetches = [y_pred]   \n",
    "    predict_labels_list = list()  # 所有的预测结果\n",
    "    predict_scores = list()\n",
    "    for i in tqdm(xrange(n_te_batches)):\n",
    "        [X1_batch, X2_batch, X3_batch, X4_batch] = get_test_batch(i)\n",
    "        _batch_size = len(X1_batch)\n",
    "        feed_dict = {X1_inputs:X1_batch, X2_inputs:X2_batch, X3_inputs:X3_batch, X4_inputs:X4_batch,\n",
    "                     batch_size:_batch_size, keep_prob:1.0, tst:True, n_updates:global_step}\n",
    "        predict_labels = sess.run(fetches, feed_dict)[0]\n",
    "        predict_scores.append(predict_labels)\n",
    "        predict_labels = map(lambda label: label.argsort()[-1:-6:-1], predict_labels) # 取最大的5个下标\n",
    "        predict_labels_list.extend(predict_labels)\n",
    "    predict_scores = np.asarray(predict_scores)\n",
    "    return np.vstack(predict_scores)\n",
    "\n",
    "sum_scores = np.zeros((217360, 1999), dtype=float)   \n",
    "valid_epoch = [6,7, 8]\n",
    "for epoch in valid_epoch:\n",
    "    score = predict(epoch)\n",
    "    sum_scores = sum_scores + score\n",
    "print('sum_scores.shape=', sum_scores.shape)\n",
    "np.save('../scores/' + model_name + '-all.npy', sum_scores)\n",
    "print('Finished.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>question</th>\n",
       "      <th>tid0</th>\n",
       "      <th>tid1</th>\n",
       "      <th>tid2</th>\n",
       "      <th>tid3</th>\n",
       "      <th>tid4</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>6215603645409872328</td>\n",
       "      <td>-7506384235581390893</td>\n",
       "      <td>2919247920214845195</td>\n",
       "      <td>4610596224687453206</td>\n",
       "      <td>-6839713564940654454</td>\n",
       "      <td>-5932391056759866388</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>6649324930261961840</td>\n",
       "      <td>3418451812342379591</td>\n",
       "      <td>-240041917918953337</td>\n",
       "      <td>2858911571784840089</td>\n",
       "      <td>-212983527176510806</td>\n",
       "      <td>3383016985780045156</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>-4251899610700378615</td>\n",
       "      <td>2919247920214845195</td>\n",
       "      <td>-3315241959305847628</td>\n",
       "      <td>-5265476641576484497</td>\n",
       "      <td>2816249700493135244</td>\n",
       "      <td>-7358589937244777363</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>6213817087034420233</td>\n",
       "      <td>-8655945395761165989</td>\n",
       "      <td>-4966205278807386328</td>\n",
       "      <td>5804619920623030604</td>\n",
       "      <td>-5985158251850250708</td>\n",
       "      <td>7476760589625268543</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>-8930652370334418373</td>\n",
       "      <td>3972493657017129406</td>\n",
       "      <td>-8963554618409314978</td>\n",
       "      <td>-1115593437686158905</td>\n",
       "      <td>1870872991887862017</td>\n",
       "      <td>6018641953300645757</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>-2893445476547593888</td>\n",
       "      <td>2131451573312950491</td>\n",
       "      <td>-6206436693745657677</td>\n",
       "      <td>-2696736445927423374</td>\n",
       "      <td>-8655945395761165989</td>\n",
       "      <td>-8274522839089381384</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>2614833994648160978</td>\n",
       "      <td>4482402820945758152</td>\n",
       "      <td>-6748914495015758455</td>\n",
       "      <td>-3856154743789177934</td>\n",
       "      <td>-3904210248050890128</td>\n",
       "      <td>1553849747662134917</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>1572988006266661060</td>\n",
       "      <td>3738968195649774859</td>\n",
       "      <td>-839691564858654120</td>\n",
       "      <td>7739004195693774975</td>\n",
       "      <td>-2627298052801704596</td>\n",
       "      <td>1160326435131345730</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>-3736249911643942320</td>\n",
       "      <td>-7653703019053330516</td>\n",
       "      <td>8690965822342756180</td>\n",
       "      <td>738845194850773558</td>\n",
       "      <td>8382235673310200820</td>\n",
       "      <td>5502528845814007324</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>-976507019126932319</td>\n",
       "      <td>3195914392210930723</td>\n",
       "      <td>3804601920633030746</td>\n",
       "      <td>6940355838132160535</td>\n",
       "      <td>4715442001886462944</td>\n",
       "      <td>6718676536613592056</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>5515590371171945660</td>\n",
       "      <td>4195795391451929480</td>\n",
       "      <td>4351331710881888756</td>\n",
       "      <td>-1689319711084901730</td>\n",
       "      <td>-7358589937244777363</td>\n",
       "      <td>-5265476641576484497</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>6888753259805649782</td>\n",
       "      <td>4931893522763265934</td>\n",
       "      <td>7283655748413496857</td>\n",
       "      <td>-6254806834008898035</td>\n",
       "      <td>-5932391056759866388</td>\n",
       "      <td>-9176307901497282391</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>7873230180977758006</td>\n",
       "      <td>4730384719025183341</td>\n",
       "      <td>-6440461292041887516</td>\n",
       "      <td>-7283993654004755131</td>\n",
       "      <td>-3026760652624195547</td>\n",
       "      <td>-5932391056759866388</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>1805154204046808321</td>\n",
       "      <td>3738968195649774859</td>\n",
       "      <td>-2627298052801704596</td>\n",
       "      <td>-839691564858654120</td>\n",
       "      <td>1160326435131345730</td>\n",
       "      <td>-8932546057542867495</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>5466502219528315935</td>\n",
       "      <td>-3333307402327293632</td>\n",
       "      <td>5732944515355059947</td>\n",
       "      <td>5370870069386720811</td>\n",
       "      <td>2787171473654490487</td>\n",
       "      <td>616649108700697054</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                question                  tid0                  tid1  \\\n",
       "0    6215603645409872328  -7506384235581390893   2919247920214845195   \n",
       "1    6649324930261961840   3418451812342379591   -240041917918953337   \n",
       "2   -4251899610700378615   2919247920214845195  -3315241959305847628   \n",
       "3    6213817087034420233  -8655945395761165989  -4966205278807386328   \n",
       "4   -8930652370334418373   3972493657017129406  -8963554618409314978   \n",
       "5   -2893445476547593888   2131451573312950491  -6206436693745657677   \n",
       "6    2614833994648160978   4482402820945758152  -6748914495015758455   \n",
       "7    1572988006266661060   3738968195649774859   -839691564858654120   \n",
       "8   -3736249911643942320  -7653703019053330516   8690965822342756180   \n",
       "9    -976507019126932319   3195914392210930723   3804601920633030746   \n",
       "10   5515590371171945660   4195795391451929480   4351331710881888756   \n",
       "11   6888753259805649782   4931893522763265934   7283655748413496857   \n",
       "12   7873230180977758006   4730384719025183341  -6440461292041887516   \n",
       "13   1805154204046808321   3738968195649774859  -2627298052801704596   \n",
       "14   5466502219528315935  -3333307402327293632   5732944515355059947   \n",
       "\n",
       "                    tid2                  tid3                  tid4  \n",
       "0    4610596224687453206  -6839713564940654454  -5932391056759866388  \n",
       "1    2858911571784840089   -212983527176510806   3383016985780045156  \n",
       "2   -5265476641576484497   2816249700493135244  -7358589937244777363  \n",
       "3    5804619920623030604  -5985158251850250708   7476760589625268543  \n",
       "4   -1115593437686158905   1870872991887862017   6018641953300645757  \n",
       "5   -2696736445927423374  -8655945395761165989  -8274522839089381384  \n",
       "6   -3856154743789177934  -3904210248050890128   1553849747662134917  \n",
       "7    7739004195693774975  -2627298052801704596   1160326435131345730  \n",
       "8     738845194850773558   8382235673310200820   5502528845814007324  \n",
       "9    6940355838132160535   4715442001886462944   6718676536613592056  \n",
       "10  -1689319711084901730  -7358589937244777363  -5265476641576484497  \n",
       "11  -6254806834008898035  -5932391056759866388  -9176307901497282391  \n",
       "12  -7283993654004755131  -3026760652624195547  -5932391056759866388  \n",
       "13   -839691564858654120   1160326435131345730  -8932546057542867495  \n",
       "14   5370870069386720811   2787171473654490487    616649108700697054  "
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_result.head(15)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>question</th>\n",
       "      <th>tid0</th>\n",
       "      <th>tid1</th>\n",
       "      <th>tid2</th>\n",
       "      <th>tid3</th>\n",
       "      <th>tid4</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>6215603645409872328</td>\n",
       "      <td>4610596224687453206</td>\n",
       "      <td>-6839713564940654454</td>\n",
       "      <td>-6306904715218704629</td>\n",
       "      <td>2919247920214845195</td>\n",
       "      <td>-8091907016971478715</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>6649324930261961840</td>\n",
       "      <td>3418451812342379591</td>\n",
       "      <td>2858911571784840089</td>\n",
       "      <td>2382911985227044227</td>\n",
       "      <td>-240041917918953337</td>\n",
       "      <td>3383016985780045156</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>-4251899610700378615</td>\n",
       "      <td>2919247920214845195</td>\n",
       "      <td>-7358589937244777363</td>\n",
       "      <td>2816249700493135244</td>\n",
       "      <td>-5265476641576484497</td>\n",
       "      <td>-3315241959305847628</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>6213817087034420233</td>\n",
       "      <td>-8655945395761165989</td>\n",
       "      <td>5804619920623030604</td>\n",
       "      <td>-4966205278807386328</td>\n",
       "      <td>7476760589625268543</td>\n",
       "      <td>-2523521411748733187</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>-8930652370334418373</td>\n",
       "      <td>3972493657017129406</td>\n",
       "      <td>-8963554618409314978</td>\n",
       "      <td>-1115593437686158905</td>\n",
       "      <td>1870872991887862017</td>\n",
       "      <td>6018641953300645757</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "               question                  tid0                  tid1  \\\n",
       "0   6215603645409872328   4610596224687453206  -6839713564940654454   \n",
       "1   6649324930261961840   3418451812342379591   2858911571784840089   \n",
       "2  -4251899610700378615   2919247920214845195  -7358589937244777363   \n",
       "3   6213817087034420233  -8655945395761165989   5804619920623030604   \n",
       "4  -8930652370334418373   3972493657017129406  -8963554618409314978   \n",
       "\n",
       "                   tid2                  tid3                  tid4  \n",
       "0  -6306904715218704629   2919247920214845195  -8091907016971478715  \n",
       "1   2382911985227044227   -240041917918953337   3383016985780045156  \n",
       "2   2816249700493135244  -5265476641576484497  -3315241959305847628  \n",
       "3  -4966205278807386328   7476760589625268543  -2523521411748733187  \n",
       "4  -1115593437686158905   1870872991887862017   6018641953300645757  "
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 参考结果\n",
    "df_result.head(5)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 在全部预测正确的情况下，理论值为：f1=0.713933\n",
    "precision=2.50273, recall=0.998873, f1=0.713933"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "在全部预测正确的情况下，理论值为：\n",
      "precision=2.50273, recall=0.998873, f1=0.713933\n"
     ]
    }
   ],
   "source": [
    "# 假设全部正确，f1 值最高能到多少\n",
    "def padding_label(labels):\n",
    "    \"\"\"把所有的label补齐到长度为 5\"\"\"\n",
    "    label_len = len(labels)\n",
    "    if label_len >= 5:\n",
    "        return labels[:5]\n",
    "    return np.hstack([labels, np.zeros(5-label_len, dtype=int) - 1])\n",
    "    \n",
    "\n",
    "marked_labels_list = data_valid.y.tolist() # 所有的标注结果\n",
    "predict_labels_list = map(padding_label, marked_labels_list)\n",
    "predict_label_and_marked_label_list = zip(predict_labels_list, marked_labels_list)\n",
    "precision, recall, f1 = score_eval(predict_label_and_marked_label_list)\n",
    "print '在全部预测正确的情况下，理论值为：'\n",
    "print 'precision=%g, recall=%g, f1=%g' % (precision, recall, f1)"
   ]
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python [conda root]",
   "language": "python",
   "name": "conda-root-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
