{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "import time\n",
    "import sys\n",
    "import os\n",
    "import datetime\n",
    "\n",
    "from sklearn.metrics import roc_auc_score, precision_recall_fscore_support, accuracy_score\n",
    "import numpy as np\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 配置config\n",
    "class TrainConfig(object):\n",
    "    epochs = 15\n",
    "    decay_rate = 0.92\n",
    "    learning_rate = 0.01\n",
    "    evaluate_every = 100\n",
    "    checkpoint_every = 100\n",
    "    max_grad_norm = 3.0\n",
    "\n",
    "\n",
    "class ModelConfig(object):\n",
    "    hidden_layers = [200]\n",
    "    dropout_keep_prob = 0.6\n",
    "\n",
    "\n",
    "class Config(object):\n",
    "    batch_size = 10\n",
    "    num_skills = 267\n",
    "    input_size = num_skills * 2\n",
    "\n",
    "    trainConfig = TrainConfig()\n",
    "    modelConfig = ModelConfig()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实例化config\n",
    "config = Config()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成数据\n",
    "class DataGenerator(object):\n",
    "    # 导入的seqs是train_seqs，或者是test_seqs\n",
    "    def __init__(self, fileName, config):\n",
    "        self.fileName = fileName\n",
    "        self.train_seqs = []\n",
    "        self.test_seqs = []\n",
    "        self.infer_seqs = []\n",
    "        self.batch_size = config.batch_size\n",
    "        self.pos = 0\n",
    "        self.end = False\n",
    "        self.num_skills = config.num_skills\n",
    "        self.skills_to_int = {}  # 知识点到索引的映射\n",
    "        self.int_to_skills = {}  # 索引到知识点的映射\n",
    "\n",
    "    def read_file(self):\n",
    "        # 从文件中读取数据，返回读取出来的数据和知识点个数\n",
    "        # 保存每个学生的做题信息 {学生id: [[知识点id，答题结果], [知识点id，答题结果], ...]}，用一个二元列表来表示一个学生的答题信息\n",
    "        seqs_by_student = {}\n",
    "        skills = []  # 统计知识点的数量，之后输入的向量长度就是两倍的知识点数量\n",
    "        count = 0\n",
    "        with open(self.fileName, 'r') as f:\n",
    "            for line in f:\n",
    "                count += 1\n",
    "                if count > 1:\n",
    "                    fields = line.strip().split(\",\")  # 一个列表，[学生id，知识点id，答题结果]\n",
    "                    student, skill, is_correct = int(fields[0]), int(fields[1]), int(fields[2])\n",
    "                    skills.append(skill)  # skill实际上是用该题所属知识点来表示的\n",
    "                    seqs_by_student[student] = seqs_by_student.get(student, []) + [[skill, is_correct]]  # 保存每个学生的做题信息\n",
    "        return seqs_by_student, list(set(skills))\n",
    "\n",
    "    def gen_dict(self, unique_skills):\n",
    "        \"\"\"\n",
    "        构建知识点映射表，将知识点id映射到[0, 1, 2...]表示\n",
    "        :param unique_skills: 无重复的知识点列表\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        sorted_skills = sorted(unique_skills)\n",
    "        skills_to_int = {}\n",
    "        int_to_skills = {}\n",
    "        for i in range(len(sorted_skills)):\n",
    "            skills_to_int[sorted_skills[i]] = i\n",
    "            int_to_skills[i] = sorted_skills[i]\n",
    "\n",
    "        self.skills_to_int = skills_to_int\n",
    "        self.int_to_skills = int_to_skills\n",
    "\n",
    "    def split_dataset(self, seqs_by_student, sample_rate=0.2, random_seed=1):\n",
    "        # 将数据分割成测试集和训练集\n",
    "        sorted_keys = sorted(seqs_by_student.keys())  # 得到排好序的学生id的列表\n",
    "\n",
    "        random.seed(random_seed)\n",
    "        # 随机抽取学生id，将这部分学生作为测试集\n",
    "        test_keys = set(random.sample(sorted_keys, int(len(sorted_keys) * sample_rate)))\n",
    "\n",
    "        # 此时是一个三层的列表来表示的，最外层的列表中的每一个列表表示一个学生的做题信息\n",
    "        test_seqs = [seqs_by_student[k] for k in seqs_by_student if k in test_keys]\n",
    "        train_seqs = [seqs_by_student[k] for k in seqs_by_student if k not in test_keys]\n",
    "        return train_seqs, test_seqs\n",
    "\n",
    "    def gen_attr(self, is_infer=False):\n",
    "        \"\"\"\n",
    "        生成待处理的数据集\n",
    "        :param is_infer: 判断当前是训练模型还是利用模型进行预测\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        if is_infer:\n",
    "            seqs_by_students, skills = self.read_file()\n",
    "            self.infer_seqs = seqs_by_students\n",
    "        else:\n",
    "            seqs_by_students, skills = self.read_file()\n",
    "            train_seqs, test_seqs = self.split_dataset(seqs_by_students)\n",
    "            self.train_seqs = train_seqs\n",
    "            self.test_seqs = test_seqs\n",
    "\n",
    "        self.gen_dict(skills)  # 生成知识点到索引的映射字典\n",
    "\n",
    "    def pad_sequences(self, sequences, maxlen=None, value=0.):\n",
    "        # 按每个batch中最长的序列进行补全, 传入的sequences是二层列表\n",
    "        # 统计一个batch中每个序列的长度，其实等于seqs_len\n",
    "        lengths = [len(s) for s in sequences]\n",
    "        # 统计下该batch中序列的数量\n",
    "        nb_samples = len(sequences)\n",
    "        # 如果没有传入maxlen参数就自动获取最大的序列长度\n",
    "        if maxlen is None:\n",
    "            maxlen = np.max(lengths)\n",
    "        # 构建x矩阵\n",
    "        x = (np.ones((nb_samples, maxlen)) * value).astype(np.int32)\n",
    "\n",
    "        # 遍历batch，去除每一个序列\n",
    "        for idx, s in enumerate(sequences):\n",
    "            trunc = np.asarray(s, dtype=np.int32)\n",
    "            x[idx, :len(trunc)] = trunc\n",
    "\n",
    "        return x\n",
    "\n",
    "    def num_to_one_hot(self, num, dim):\n",
    "        # 将题目转换成one-hot的形式， 其中dim=num_skills * 2，前半段表示错误，后半段表示正确\n",
    "        base = np.zeros(dim)\n",
    "        if num >= 0:\n",
    "            base[num] += 1\n",
    "        return base\n",
    "\n",
    "    def format_data(self, seqs):\n",
    "        # 生成输入数据和输出数据，输入数据是每条序列的前n-1个元素，输出数据是每条序列的后n-1个元素\n",
    "\n",
    "        # 统计一个batch_size中每条序列的长度，在这里不对序列固定长度，通过条用tf.nn.dynamic_rnn让序列长度可以不固定\n",
    "        seq_len = np.array(list(map(lambda seq: len(seq) - 1, seqs)))\n",
    "        max_len = max(seq_len)  # 获得一个batch_size中最大的长度\n",
    "        # i表示第i条数据，j只从0到len(i)-1，x作为输入只取前len(i)-1个，sequences=[j[0] + num_skills * j[1], ....]\n",
    "        # 此时要将知识点id j[0] 转换成index表示\n",
    "        x_sequences = np.array([[(self.skills_to_int[j[0]] + self.num_skills * j[1]) for j in i[:-1]] for i in seqs])\n",
    "        # 将输入的序列用-1进行补全，补全后的长度为当前batch的最大序列长度\n",
    "        x = self.pad_sequences(x_sequences, maxlen=max_len, value=-1)\n",
    "\n",
    "        # 构建输入值input_x，x为一个二层列表，i表示一个学生的做题信息，也就是一个序列，j就是一道题的信息\n",
    "        input_x = np.array([[self.num_to_one_hot(j, self.num_skills * 2) for j in i] for i in x])\n",
    "\n",
    "        # 遍历batch_size，然后取每条序列的后len(i)-1 个元素中的知识点id为target_id\n",
    "        target_id_seqs = np.array([[self.skills_to_int[j[0]] for j in i[1:]] for i in seqs])\n",
    "        target_id = self.pad_sequences(target_id_seqs, maxlen=max_len, value=0)\n",
    "\n",
    "        # 同target_id\n",
    "        target_correctness_seqs = np.array([[j[1] for j in i[1:]] for i in seqs])\n",
    "        target_correctness = self.pad_sequences(target_correctness_seqs, maxlen=max_len, value=0)\n",
    "\n",
    "        return dict(input_x=input_x, target_id=target_id, target_correctness=target_correctness,\n",
    "                    seq_len=seq_len, max_len=max_len)\n",
    "\n",
    "    def next_batch(self, seqs):\n",
    "        # 接收一个序列，生成batch\n",
    "\n",
    "        length = len(seqs)\n",
    "        num_batchs = length // self.batch_size\n",
    "        start = 0\n",
    "        for i in range(num_batchs):\n",
    "            batch_seqs = seqs[start: start + self.batch_size]\n",
    "            start += self.batch_size\n",
    "            params = self.format_data(batch_seqs)\n",
    "\n",
    "            yield params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_x shape: (10, 64, 534)\n",
      "[ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  1.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.\n",
      "  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]\n"
     ]
    }
   ],
   "source": [
    "fileName = \"./data/knowledgeTracing.csv\"\n",
    "dataGen = DataGenerator(fileName, config)\n",
    "dataGen.gen_attr()\n",
    "train_seqs = dataGen.train_seqs\n",
    "params = next(dataGen.next_batch(train_seqs))\n",
    "print(\"train_seqs: {}\".format(dataGen.train_seqs.shape))\n",
    "print(\"test_seqs: {}\".format(dataGen.test_seqs.shape))\n",
    "print(\"input_x shape: {}\".format(params['input_x'].shape))\n",
    "print(params[\"input_x\"][0][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建模型\n",
    "class TensorFlowDKT(object):\n",
    "    def __init__(self, config):\n",
    "        # 导入配置好的参数\n",
    "        self.hiddens = hiddens = config.modelConfig.hidden_layers\n",
    "        self.num_skills = num_skills = config.num_skills\n",
    "        self.input_size = input_size = config.input_size\n",
    "        self.batch_size = batch_size = config.batch_size\n",
    "        self.keep_prob_value = config.modelConfig.dropout_keep_prob\n",
    "\n",
    "        # 定义需要喂给模型的参数\n",
    "        self.max_steps = tf.placeholder(tf.int32, name=\"max_steps\")  # 当前batch中最大序列长度\n",
    "        self.input_data = tf.placeholder(tf.float32, [batch_size, None, input_size], name=\"input_x\")\n",
    "\n",
    "        self.sequence_len = tf.placeholder(tf.int32, [batch_size], name=\"sequence_len\")\n",
    "        self.keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")  # dropout keep prob\n",
    "\n",
    "        self.target_id = tf.placeholder(tf.int32, [batch_size, None], name=\"target_id\")\n",
    "        self.target_correctness = tf.placeholder(tf.float32, [batch_size, None], name=\"target_correctness\")\n",
    "        self.flat_target_correctness = None\n",
    "\n",
    "        # 构建lstm模型结构\n",
    "        hidden_layers = []\n",
    "        for idx, hidden_size in enumerate(hiddens):\n",
    "            lstm_layer = tf.nn.rnn_cell.LSTMCell(num_units=hidden_size, state_is_tuple=True)\n",
    "            hidden_layer = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_layer,\n",
    "                                                         output_keep_prob=self.keep_prob)\n",
    "            hidden_layers.append(hidden_layer)\n",
    "        self.hidden_cell = tf.nn.rnn_cell.MultiRNNCell(cells=hidden_layers, state_is_tuple=True)\n",
    "\n",
    "        # 采用动态rnn，动态输入序列的长度\n",
    "        outputs, self.current_state = tf.nn.dynamic_rnn(cell=self.hidden_cell,\n",
    "                                                        inputs=self.input_data,\n",
    "                                                        sequence_length=self.sequence_len,\n",
    "                                                        dtype=tf.float32)\n",
    "\n",
    "        # 隐层到输出层的权重系数[最后隐层的神经元数量，知识点数]\n",
    "        output_w = tf.get_variable(\"W\", [hiddens[-1], num_skills])\n",
    "        output_b = tf.get_variable(\"b\", [num_skills])\n",
    "\n",
    "        self.output = tf.reshape(outputs, [batch_size * self.max_steps, hiddens[-1]])\n",
    "        # 因为权值共享的原因，对生成的矩阵[batch_size * self.max_steps, num_skills]中的每一行都加上b\n",
    "        self.logits = tf.matmul(self.output, output_w) + output_b\n",
    "\n",
    "        self.mat_logits = tf.reshape(self.logits, [batch_size, self.max_steps, num_skills])\n",
    "\n",
    "        # 对每个batch中每个序列中的每个时间点的输出中的每个值进行sigmoid计算，这里的值表示对某个知识点的掌握情况，\n",
    "        # 每个时间点都会输出对所有知识点的掌握情况\n",
    "        self.pred_all = tf.sigmoid(self.mat_logits, name=\"pred_all\")\n",
    "\n",
    "        # 计算损失loss\n",
    "        flat_logits = tf.reshape(self.logits, [-1])\n",
    "\n",
    "        flat_target_correctness = tf.reshape(self.target_correctness, [-1])\n",
    "        self.flat_target_correctness = flat_target_correctness\n",
    "\n",
    "        flat_base_target_index = tf.range(batch_size * self.max_steps) * num_skills\n",
    "\n",
    "        # 因为flat_logits的长度为batch_size * num_steps * num_skills，我们要根据每一步的target_id将其长度变成batch_size * num_steps\n",
    "        flat_base_target_id = tf.reshape(self.target_id, [-1])\n",
    "\n",
    "        flat_target_id = flat_base_target_id + flat_base_target_index\n",
    "        # gather是从一个tensor中切片一个子集\n",
    "        flat_target_logits = tf.gather(flat_logits, flat_target_id)\n",
    "\n",
    "        # 对切片后的数据进行sigmoid转换\n",
    "        self.pred = tf.sigmoid(tf.reshape(flat_target_logits, [batch_size, self.max_steps]), name=\"pred\")\n",
    "        # 将sigmoid后的值表示为0或1\n",
    "        self.binary_pred = tf.cast(tf.greater_equal(self.pred, 0.5), tf.float32, name=\"binary_pred\")\n",
    "\n",
    "        # 定义损失函数\n",
    "        with tf.name_scope(\"loss\"):\n",
    "            # flat_target_logits_sigmoid = tf.nn.log_softmax(flat_target_logits)\n",
    "            # self.loss = -tf.reduce_mean(flat_target_correctness * flat_target_logits_sigmoid)\n",
    "            self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=flat_target_correctness,\n",
    "                                                                               logits=flat_target_logits))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Summary name dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel:0/grad/hist is illegal; using dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel_0/grad/hist instead.\n",
      "INFO:tensorflow:Summary name dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel:0/grad/sparsity is illegal; using dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel_0/grad/sparsity instead.\n",
      "INFO:tensorflow:Summary name dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/bias:0/grad/hist is illegal; using dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/bias_0/grad/hist instead.\n",
      "INFO:tensorflow:Summary name dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/bias:0/grad/sparsity is illegal; using dkt/rnn/multi_rnn_cell/cell_0/lstm_cell/bias_0/grad/sparsity instead.\n",
      "INFO:tensorflow:Summary name dkt/W:0/grad/hist is illegal; using dkt/W_0/grad/hist instead.\n",
      "INFO:tensorflow:Summary name dkt/W:0/grad/sparsity is illegal; using dkt/W_0/grad/sparsity instead.\n",
      "INFO:tensorflow:Summary name dkt/b:0/grad/hist is illegal; using dkt/b_0/grad/hist instead.\n",
      "INFO:tensorflow:Summary name dkt/b:0/grad/sparsity is illegal; using dkt/b_0/grad/sparsity instead.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/jiangxinyang848/anaconda3/envs/jiang/lib/python3.5/site-packages/tensorflow/python/ops/gradients_impl.py:96: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.\n",
      "  \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "writing to /data4T/share/jiangxinyang848/dkt/runs/1540468172\n",
      "初始化完毕，开始训练\n",
      "train: 2018-10-25T19:49:33.935153: step 1, loss 0.6947927474975586, acc 0.48619447779111646, auc: 0.4918510713031261\n",
      "train: 2018-10-25T19:49:34.348814: step 2, loss 0.6826914548873901, acc 0.6371428571428571, auc: 0.6983210827963604\n",
      "train: 2018-10-25T19:49:34.704649: step 3, loss 0.6839419603347778, acc 0.6334250343878954, auc: 0.6529206054989187\n",
      "train: 2018-10-25T19:49:35.045240: step 4, loss 0.5792971253395081, acc 0.7582903463522476, auc: 0.7443520990050958\n",
      "train: 2018-10-25T19:49:35.680104: step 5, loss 0.6496233344078064, acc 0.685807150595883, auc: 0.6368848386233709\n",
      "train: 2018-10-25T19:49:36.113332: step 6, loss 0.7021452784538269, acc 0.6653771760154739, auc: 0.6905129335684891\n",
      "train: 2018-10-25T19:49:36.550396: step 7, loss 0.6026129126548767, acc 0.7809239940387481, auc: 0.6576149582242394\n",
      "train: 2018-10-25T19:49:37.134755: step 8, loss 0.6327204704284668, acc 0.7117493472584856, auc: 0.6644625131488314\n",
      "train: 2018-10-25T19:49:37.597835: step 9, loss 0.6014018654823303, acc 0.77, auc: 0.707541767215839\n",
      "train: 2018-10-25T19:49:38.154094: step 10, loss 0.6165726780891418, acc 0.7207155222158107, auc: 0.6351928246720246\n",
      "train: 2018-10-25T19:49:38.678853: step 11, loss 0.6529389023780823, acc 0.6554121151936445, auc: 0.6504357587867599\n",
      "train: 2018-10-25T19:49:39.022104: step 12, loss 0.6231943964958191, acc 0.6529877609791217, auc: 0.7281577733217397\n",
      "train: 2018-10-25T19:49:39.333254: step 13, loss 0.5895382761955261, acc 0.7168304668304668, auc: 0.6838728168714578\n",
      "train: 2018-10-25T19:49:39.751422: step 14, loss 0.5997651815414429, acc 0.7457044673539519, auc: 0.625971766806755\n",
      "train: 2018-10-25T19:49:40.091650: step 15, loss 0.5528448224067688, acc 0.8033306899286281, auc: 0.6654983951425777\n",
      "train: 2018-10-25T19:49:40.471957: step 16, loss 0.597986102104187, acc 0.6807039597737272, auc: 0.7217482710047766\n",
      "train: 2018-10-25T19:49:40.695669: step 17, loss 0.590935468673706, acc 0.7642045454545454, auc: 0.7069684447783622\n",
      "train: 2018-10-25T19:49:41.331840: step 18, loss 0.625268280506134, acc 0.6052631578947368, auc: 0.7132066728452271\n",
      "train: 2018-10-25T19:49:41.691883: step 19, loss 0.570222795009613, acc 0.7894211576846307, auc: 0.6027304157678556\n",
      "train: 2018-10-25T19:49:42.010116: step 20, loss 0.5858055949211121, acc 0.7237704918032787, auc: 0.6384311815203885\n",
      "train: 2018-10-25T19:49:42.434015: step 21, loss 0.5624483227729797, acc 0.7230576441102757, auc: 0.6497057369633548\n",
      "train: 2018-10-25T19:49:42.780596: step 22, loss 0.5691002011299133, acc 0.7606284658040665, auc: 0.6092040535222353\n",
      "train: 2018-10-25T19:49:43.188781: step 23, loss 0.5376462340354919, acc 0.7894327894327894, auc: 0.6335186278368097\n",
      "train: 2018-10-25T19:49:43.527572: step 24, loss 0.5378552079200745, acc 0.7657021877205363, auc: 0.7596030729833547\n",
      "train: 2018-10-25T19:49:43.752967: step 25, loss 0.528416097164154, acc 0.7671092951991828, auc: 0.6623027330617411\n",
      "train: 2018-10-25T19:49:44.141137: step 26, loss 0.5819234848022461, acc 0.7195496417604913, auc: 0.6935474650074797\n",
      "train: 2018-10-25T19:49:44.625042: step 27, loss 0.5510955452919006, acc 0.7392359005457854, auc: 0.7125337112935294\n",
      "train: 2018-10-25T19:49:44.933441: step 28, loss 0.47637003660202026, acc 0.8691423519009726, auc: 0.6816887167290904\n",
      "train: 2018-10-25T19:49:45.268979: step 29, loss 0.5444242358207703, acc 0.767052767052767, auc: 0.6776368978368466\n",
      "train: 2018-10-25T19:49:45.758867: step 30, loss 0.5473979115486145, acc 0.7352941176470589, auc: 0.6819233630952382\n",
      "train: 2018-10-25T19:49:46.238259: step 31, loss 0.5372132658958435, acc 0.7639109697933227, auc: 0.6923601688527061\n",
      "train: 2018-10-25T19:49:46.664342: step 32, loss 0.5671365261077881, acc 0.7205408886027045, auc: 0.640571469157879\n",
      "train: 2018-10-25T19:49:47.055269: step 33, loss 0.5486971735954285, acc 0.7601593625498008, auc: 0.5723512068588491\n",
      "train: 2018-10-25T19:49:47.456008: step 34, loss 0.5138095617294312, acc 0.7370012091898428, auc: 0.7915533915779256\n",
      "train: 2018-10-25T19:49:47.824180: step 35, loss 0.44218873977661133, acc 0.8683853459972863, auc: 0.6918294814602364\n",
      "train: 2018-10-25T19:49:48.179005: step 36, loss 0.5256248712539673, acc 0.7673325499412456, auc: 0.6995637198622273\n",
      "train: 2018-10-25T19:49:48.764197: step 37, loss 0.5344474911689758, acc 0.7591652566271856, auc: 0.6336233488329461\n",
      "train: 2018-10-25T19:49:49.095139: step 38, loss 0.48788779973983765, acc 0.8119521912350598, auc: 0.7009521299377326\n",
      "train: 2018-10-25T19:49:49.379935: step 39, loss 0.41041743755340576, acc 0.8687089715536105, auc: 0.711563367252544\n",
      "train: 2018-10-25T19:49:49.784479: step 40, loss 0.6294397711753845, acc 0.5736526946107784, auc: 0.5865196089150118\n",
      "train: 2018-10-25T19:49:50.745360: step 41, loss 0.5306633710861206, acc 0.6909914443885254, auc: 0.7542336241410876\n",
      "train: 2018-10-25T19:49:51.270503: step 42, loss 0.5348879098892212, acc 0.7120743034055728, auc: 0.6959355260449207\n",
      "train: 2018-10-25T19:49:51.512678: step 43, loss 0.532840371131897, acc 0.7252861602497399, auc: 0.6662388363014065\n",
      "train: 2018-10-25T19:49:51.817780: step 44, loss 0.4824948310852051, acc 0.7822525597269625, auc: 0.7960659777703201\n",
      "train: 2018-10-25T19:49:52.135584: step 45, loss 0.46809786558151245, acc 0.8111498257839721, auc: 0.7506546664871941\n",
      "train: 2018-10-25T19:49:52.592211: step 46, loss 0.49974292516708374, acc 0.7731034482758621, auc: 0.7194638609677932\n",
      "train: 2018-10-25T19:49:53.098154: step 47, loss 0.5091418027877808, acc 0.7097046413502109, auc: 0.7104015929104268\n",
      "train: 2018-10-25T19:49:53.429609: step 48, loss 0.5420015454292297, acc 0.6966292134831461, auc: 0.7539399464988829\n",
      "train: 2018-10-25T19:49:53.865002: step 49, loss 0.5152373909950256, acc 0.7309594460929772, auc: 0.7456964302622857\n",
      "train: 2018-10-25T19:49:54.465170: step 50, loss 0.5183131098747253, acc 0.697961373390558, auc: 0.6722971435192273\n",
      "train: 2018-10-25T19:49:54.793768: step 51, loss 0.5310993194580078, acc 0.6900647948164147, auc: 0.6753621830897963\n",
      "train: 2018-10-25T19:49:55.283283: step 52, loss 0.510566234588623, acc 0.7334978408389883, auc: 0.6789223266325681\n",
      "train: 2018-10-25T19:49:55.763735: step 53, loss 0.48147016763687134, acc 0.7642225031605563, auc: 0.7775915194857662\n",
      "train: 2018-10-25T19:49:56.035028: step 54, loss 0.43849751353263855, acc 0.8134453781512605, auc: 0.9144247467102515\n",
      "train: 2018-10-25T19:49:56.396937: step 55, loss 0.43666359782218933, acc 0.8001700680272109, auc: 0.8834598509645518\n",
      "train: 2018-10-25T19:49:56.907200: step 56, loss 0.5093433856964111, acc 0.7459627329192546, auc: 0.6243112802297976\n",
      "train: 2018-10-25T19:49:57.262738: step 57, loss 0.419361412525177, acc 0.8734491315136477, auc: 0.6864928554101353\n",
      "train: 2018-10-25T19:49:57.636039: step 58, loss 0.4449378252029419, acc 0.8419388830347735, auc: 0.6690353671485747\n",
      "train: 2018-10-25T19:49:58.055044: step 59, loss 0.49820569157600403, acc 0.7091945830363506, auc: 0.7099518825112312\n",
      "train: 2018-10-25T19:49:58.474446: step 60, loss 0.4550091624259949, acc 0.8188679245283019, auc: 0.7218984095788716\n",
      "train: 2018-10-25T19:49:58.977601: step 61, loss 0.5223017334938049, acc 0.6504599211563732, auc: 0.6772405763249582\n",
      "train: 2018-10-25T19:49:59.337790: step 62, loss 0.4436118006706238, acc 0.8330275229357799, auc: 0.7528332289441706\n",
      "train: 2018-10-25T19:49:59.656510: step 63, loss 0.4468002915382385, acc 0.7982799061767005, auc: 0.7685721435721435\n",
      "train: 2018-10-25T19:50:00.009017: step 64, loss 0.42147642374038696, acc 0.8511904761904762, auc: 0.7577167838947942\n",
      "train: 2018-10-25T19:50:00.444438: step 65, loss 0.44146019220352173, acc 0.8034875922199866, auc: 0.7758739877542958\n",
      "train: 2018-10-25T19:50:00.938233: step 66, loss 0.47589555382728577, acc 0.7049754730203224, auc: 0.7327515595875338\n",
      "train: 2018-10-25T19:50:01.446934: step 67, loss 0.43657541275024414, acc 0.8052023121387283, auc: 0.7697454636358487\n",
      "train: 2018-10-25T19:50:01.885780: step 68, loss 0.4614270329475403, acc 0.7716948165404776, auc: 0.744489108117763\n",
      "train: 2018-10-25T19:50:02.355042: step 69, loss 0.4559076428413391, acc 0.7793840351979887, auc: 0.7001166755194784\n",
      "train: 2018-10-25T19:50:02.743893: step 70, loss 0.4043509066104889, acc 0.8326666666666667, auc: 0.7742075051269292\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:50:03.061089: step 71, loss 0.44575297832489014, acc 0.7790575916230367, auc: 0.7659228432709096\n",
      "train: 2018-10-25T19:50:03.586987: step 72, loss 0.4188094437122345, acc 0.8216258879242304, auc: 0.7884786377984263\n",
      "train: 2018-10-25T19:50:03.942773: step 73, loss 0.42349565029144287, acc 0.8021555042340262, auc: 0.8061997440249298\n",
      "train: 2018-10-25T19:50:04.428370: step 74, loss 0.41317886114120483, acc 0.8313253012048193, auc: 0.7348704914064391\n",
      "train: 2018-10-25T19:50:04.631821: step 75, loss 0.4182058274745941, acc 0.8059701492537313, auc: 0.7992179487179487\n",
      "train: 2018-10-25T19:50:05.144946: step 76, loss 0.47731468081474304, acc 0.7345739471106758, auc: 0.6866062077585791\n",
      "train: 2018-10-25T19:50:05.563459: step 77, loss 0.42427000403404236, acc 0.7962447844228094, auc: 0.8876734366388952\n",
      "train: 2018-10-25T19:50:06.163138: step 78, loss 0.5819935202598572, acc 0.5634813817855541, auc: 0.5255064451999883\n",
      "train: 2018-10-25T19:50:06.872868: step 79, loss 0.4482227861881256, acc 0.7474003466204506, auc: 0.8134954307968225\n",
      "train: 2018-10-25T19:50:07.249361: step 80, loss 0.4307433068752289, acc 0.7987055016181229, auc: 0.7371872779145339\n",
      "train: 2018-10-25T19:50:07.584510: step 81, loss 0.47874245047569275, acc 0.7423469387755102, auc: 0.6556495769039323\n",
      "train: 2018-10-25T19:50:08.164634: step 82, loss 0.5053662657737732, acc 0.6460055096418733, auc: 0.7140531462538412\n",
      "train: 2018-10-25T19:50:08.476809: step 83, loss 0.40283238887786865, acc 0.819646968534152, auc: 0.7842906817096467\n",
      "train: 2018-10-25T19:50:08.872080: step 84, loss 0.4403352737426758, acc 0.7940771349862259, auc: 0.7210395197260921\n",
      "train: 2018-10-25T19:50:09.193441: step 85, loss 0.41899943351745605, acc 0.8068965517241379, auc: 0.847533738191633\n",
      "train: 2018-10-25T19:50:10.162098: step 86, loss 0.41156718134880066, acc 0.7477628635346756, auc: 0.7987848467936964\n",
      "train: 2018-10-25T19:50:10.525981: step 87, loss 0.3682765066623688, acc 0.8544169611307421, auc: 0.7774573643410854\n",
      "train: 2018-10-25T19:50:11.193132: step 88, loss 0.4004213511943817, acc 0.8096804511278195, auc: 0.8024606167523128\n",
      "train: 2018-10-25T19:50:11.514047: step 89, loss 0.4425496459007263, acc 0.7511380880121397, auc: 0.7847865849401712\n",
      "train: 2018-10-25T19:50:11.836439: step 90, loss 0.3660072088241577, acc 0.8486932599724897, auc: 0.8844751543663628\n",
      "train: 2018-10-25T19:50:12.251396: step 91, loss 0.45846322178840637, acc 0.7217962760131434, auc: 0.7575723339323501\n",
      "train: 2018-10-25T19:50:12.560559: step 92, loss 0.440854012966156, acc 0.7562962962962962, auc: 0.7934008339446353\n",
      "train: 2018-10-25T19:50:12.830535: step 93, loss 0.411677747964859, acc 0.7985347985347986, auc: 0.8278372966207759\n",
      "train: 2018-10-25T19:50:13.233702: step 94, loss 0.39913877844810486, acc 0.7912621359223301, auc: 0.7693685624720107\n",
      "train: 2018-10-25T19:50:13.575169: step 95, loss 0.3589117228984833, acc 0.8375451263537906, auc: 0.885426400225162\n",
      "train: 2018-10-25T19:50:13.906989: step 96, loss 0.4063332676887512, acc 0.7932359723289777, auc: 0.8444922711324837\n",
      "train: 2018-10-25T19:50:14.229693: step 97, loss 0.3862883150577545, acc 0.8047105004906772, auc: 0.7716097527891757\n",
      "train: 2018-10-25T19:50:14.579853: step 98, loss 0.4155576229095459, acc 0.78701504354711, auc: 0.7870420390125161\n",
      "train: 2018-10-25T19:50:14.921229: step 99, loss 0.40104156732559204, acc 0.7447852760736197, auc: 0.8181917801155115\n",
      "train: 2018-10-25T19:50:15.292849: step 100, loss 0.38217565417289734, acc 0.8236607142857143, auc: 0.8456079267673471\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:50:16.787765, step: 100, loss: 0.39060261960213, acc: 0.7987418900139605, auc: 0.7649458321111233\n",
      "Saved model checkpoint to model/my-model-100\n",
      "\n",
      "train: 2018-10-25T19:50:17.311710: step 101, loss 0.4166518449783325, acc 0.7842712842712842, auc: 0.7690017788008789\n",
      "train: 2018-10-25T19:50:17.675560: step 102, loss 0.360922634601593, acc 0.8422077922077922, auc: 0.8293376676147907\n",
      "train: 2018-10-25T19:50:18.040043: step 103, loss 0.3565255105495453, acc 0.8282747603833865, auc: 0.8371191960547129\n",
      "train: 2018-10-25T19:50:18.375387: step 104, loss 0.428481787443161, acc 0.7748031496062993, auc: 0.7967506935369237\n",
      "train: 2018-10-25T19:50:18.679763: step 105, loss 0.3992761969566345, acc 0.7947136563876652, auc: 0.7472267154587962\n",
      "train: 2018-10-25T19:50:19.025550: step 106, loss 0.3664594888687134, acc 0.8045178105994787, auc: 0.888472214019961\n",
      "train: 2018-10-25T19:50:19.614889: step 107, loss 0.39658066630363464, acc 0.7637655417406749, auc: 0.7690018084874496\n",
      "train: 2018-10-25T19:50:19.963966: step 108, loss 0.3542632758617401, acc 0.8504213483146067, auc: 0.8052595971544783\n",
      "train: 2018-10-25T19:50:20.308676: step 109, loss 0.3445909023284912, acc 0.844306049822064, auc: 0.9093779439472445\n",
      "train: 2018-10-25T19:50:20.738409: step 110, loss 0.34786441922187805, acc 0.8334771354616048, auc: 0.8535043048737594\n",
      "train: 2018-10-25T19:50:21.000327: step 111, loss 0.3452257513999939, acc 0.8503274087932647, auc: 0.8463590988521141\n",
      "train: 2018-10-25T19:50:21.372072: step 112, loss 0.3609761893749237, acc 0.8384415584415584, auc: 0.7984579482017022\n",
      "train: 2018-10-25T19:50:21.903778: step 113, loss 0.3644852340221405, acc 0.7691043549712407, auc: 0.786820840619912\n",
      "train: 2018-10-25T19:50:22.535352: step 114, loss 0.3631448447704315, acc 0.7725421756835369, auc: 0.8237461234960789\n",
      "train: 2018-10-25T19:50:22.938740: step 115, loss 0.3801192343235016, acc 0.8051058530510585, auc: 0.8338660492432799\n",
      "train: 2018-10-25T19:50:23.286959: step 116, loss 0.3788183033466339, acc 0.8293436293436294, auc: 0.784881545520786\n",
      "train: 2018-10-25T19:50:23.652784: step 117, loss 0.29691922664642334, acc 0.8842345773038842, auc: 0.8671893739505876\n",
      "train: 2018-10-25T19:50:23.973063: step 118, loss 0.3355104923248291, acc 0.853, auc: 0.7919615773508595\n",
      "train: 2018-10-25T19:50:24.476013: step 119, loss 0.36868900060653687, acc 0.793854033290653, auc: 0.8502226439886122\n",
      "train: 2018-10-25T19:50:24.981493: step 120, loss 0.3401728868484497, acc 0.8201811125485123, auc: 0.8367956811890949\n",
      "train: 2018-10-25T19:50:25.339129: step 121, loss 0.3501584827899933, acc 0.8212927756653993, auc: 0.8855874925727867\n",
      "train: 2018-10-25T19:50:26.246456: step 122, loss 0.338970422744751, acc 0.8190932868352223, auc: 0.8529043381932532\n",
      "train: 2018-10-25T19:50:26.618195: step 123, loss 0.4110669493675232, acc 0.7553897180762852, auc: 0.7600641797031689\n",
      "train: 2018-10-25T19:50:26.974087: step 124, loss 0.38029924035072327, acc 0.8119087367835282, auc: 0.7681166061411447\n",
      "train: 2018-10-25T19:50:27.414998: step 125, loss 0.3782939612865448, acc 0.7908666320705761, auc: 0.7859955166089261\n",
      "train: 2018-10-25T19:50:27.736295: step 126, loss 0.31413015723228455, acc 0.8631984585741811, auc: 0.9091736415861251\n",
      "train: 2018-10-25T19:50:28.125592: step 127, loss 0.3864891529083252, acc 0.7959905660377359, auc: 0.8060533034971454\n",
      "train: 2018-10-25T19:50:28.550049: step 128, loss 0.31255364418029785, acc 0.8730769230769231, auc: 0.8323950101234406\n",
      "train: 2018-10-25T19:50:29.010100: step 129, loss 0.34346145391464233, acc 0.8207088255733148, auc: 0.7570258668390735\n",
      "train: 2018-10-25T19:50:29.451627: step 130, loss 0.3497990071773529, acc 0.7849056603773585, auc: 0.8584107384030208\n",
      "train: 2018-10-25T19:50:29.886961: step 131, loss 0.3482651114463806, acc 0.8016643550624133, auc: 0.8335917055410171\n",
      "train: 2018-10-25T19:50:30.351969: step 132, loss 0.33102652430534363, acc 0.8234432234432234, auc: 0.8240215588723052\n",
      "train: 2018-10-25T19:50:30.795112: step 133, loss 0.3781795799732208, acc 0.7820927723840345, auc: 0.7646326168874271\n",
      "train: 2018-10-25T19:50:31.424128: step 134, loss 0.3337695598602295, acc 0.7453874538745388, auc: 0.7809573713458735\n",
      "train: 2018-10-25T19:50:31.723862: step 135, loss 0.34347808361053467, acc 0.8332155477031802, auc: 0.7950433255716304\n",
      "train: 2018-10-25T19:50:32.081070: step 136, loss 0.35686442255973816, acc 0.8347055098163395, auc: 0.852916882767629\n",
      "train: 2018-10-25T19:50:32.440978: step 137, loss 0.3306722044944763, acc 0.8596491228070176, auc: 0.7437351913930182\n",
      "train: 2018-10-25T19:50:32.888751: step 138, loss 0.35966646671295166, acc 0.7696674776966748, auc: 0.8094054168429963\n",
      "train: 2018-10-25T19:50:33.214319: step 139, loss 0.37127724289894104, acc 0.8115501519756839, auc: 0.7727823206189385\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:50:33.648530: step 140, loss 0.31492897868156433, acc 0.8316755929609794, auc: 0.8659561322211926\n",
      "train: 2018-10-25T19:50:33.966876: step 141, loss 0.3292032480239868, acc 0.8365724381625441, auc: 0.8041174336650083\n",
      "train: 2018-10-25T19:50:34.450675: step 142, loss 0.33803704380989075, acc 0.796236559139785, auc: 0.8286320531629785\n",
      "train: 2018-10-25T19:50:34.857635: step 143, loss 0.32796889543533325, acc 0.825287356321839, auc: 0.8828642831306341\n",
      "train: 2018-10-25T19:50:35.340654: step 144, loss 0.317012220621109, acc 0.8214793741109531, auc: 0.8155734884458526\n",
      "train: 2018-10-25T19:50:35.630550: step 145, loss 0.3441636264324188, acc 0.8243840808591283, auc: 0.8107985227050167\n",
      "train: 2018-10-25T19:50:35.911809: step 146, loss 0.3596339821815491, acc 0.773394495412844, auc: 0.7932408229495147\n",
      "train: 2018-10-25T19:50:36.368825: step 147, loss 0.35572630167007446, acc 0.759175788795879, auc: 0.8088736222775286\n",
      "train: 2018-10-25T19:50:36.748172: step 148, loss 0.30913957953453064, acc 0.8525121555915721, auc: 0.815068987936141\n",
      "train: 2018-10-25T19:50:37.129427: step 149, loss 0.297717809677124, acc 0.8733905579399142, auc: 0.7800032232648091\n",
      "train: 2018-10-25T19:50:37.433587: step 150, loss 0.30060359835624695, acc 0.8504950495049505, auc: 0.7726793248945147\n",
      "train: 2018-10-25T19:50:37.765721: step 151, loss 0.333886057138443, acc 0.8107457898957497, auc: 0.8382707219754575\n",
      "train: 2018-10-25T19:50:38.249839: step 152, loss 0.3237711489200592, acc 0.7765505522514868, auc: 0.8539843962047319\n",
      "train: 2018-10-25T19:50:38.846570: step 153, loss 0.31094449758529663, acc 0.804752729608221, auc: 0.8850970017636685\n",
      "train: 2018-10-25T19:50:39.264759: step 154, loss 0.3126688003540039, acc 0.7966252220248667, auc: 0.7931731096072432\n",
      "train: 2018-10-25T19:50:39.934429: step 155, loss 0.3332471251487732, acc 0.771156735102654, auc: 0.8323059264972759\n",
      "train: 2018-10-25T19:50:40.262125: step 156, loss 0.2715548574924469, acc 0.8886054421768708, auc: 0.8610532324818039\n",
      "train: 2018-10-25T19:50:40.797857: step 157, loss 0.320186048746109, acc 0.8143982002249719, auc: 0.821552865096232\n",
      "train: 2018-10-25T19:50:41.045544: step 158, loss 0.3134891092777252, acc 0.8160535117056856, auc: 0.8102009381782538\n",
      "train: 2018-10-25T19:50:41.446245: step 159, loss 0.3172386586666107, acc 0.8169642857142857, auc: 0.889358953130138\n",
      "train: 2018-10-25T19:50:41.888861: step 160, loss 0.34393712878227234, acc 0.7826603325415677, auc: 0.8153648053424268\n",
      "train: 2018-10-25T19:50:42.306233: step 161, loss 0.3110511302947998, acc 0.8172043010752689, auc: 0.8414277425905332\n",
      "train: 2018-10-25T19:50:42.702090: step 162, loss 0.31290385127067566, acc 0.8633921719109747, auc: 0.8270275593224944\n",
      "train: 2018-10-25T19:50:43.189144: step 163, loss 0.35688725113868713, acc 0.7758707723372035, auc: 0.8261337829468034\n",
      "train: 2018-10-25T19:50:43.619125: step 164, loss 0.3048231303691864, acc 0.8298969072164949, auc: 0.8875730398151923\n",
      "train: 2018-10-25T19:50:43.985773: step 165, loss 0.33978742361068726, acc 0.8327759197324415, auc: 0.8335140294302976\n",
      "train: 2018-10-25T19:50:44.381690: step 166, loss 0.31908318400382996, acc 0.8072383949645948, auc: 0.8021807601163873\n",
      "train: 2018-10-25T19:50:44.744549: step 167, loss 0.3153878152370453, acc 0.8459343794579173, auc: 0.8716672122799359\n",
      "train: 2018-10-25T19:50:45.089026: step 168, loss 0.2857380509376526, acc 0.8662301216089804, auc: 0.76629594119527\n",
      "train: 2018-10-25T19:50:45.663809: step 169, loss 0.32015642523765564, acc 0.7975133214920072, auc: 0.781254456077151\n",
      "train: 2018-10-25T19:50:45.993914: step 170, loss 0.2668471038341522, acc 0.8771626297577855, auc: 0.8641846439341221\n",
      "train: 2018-10-25T19:50:46.349804: step 171, loss 0.34735995531082153, acc 0.8023483365949119, auc: 0.8619383056425653\n",
      "train: 2018-10-25T19:50:46.717175: step 172, loss 0.3128570020198822, acc 0.8202406227883935, auc: 0.8267126539801433\n",
      "train: 2018-10-25T19:50:47.057842: step 173, loss 0.2869724929332733, acc 0.8359621451104101, auc: 0.7959970674486803\n",
      "train: 2018-10-25T19:50:47.517560: step 174, loss 0.2884645164012909, acc 0.7777777777777778, auc: 0.7829124152971416\n",
      "train: 2018-10-25T19:50:48.107560: step 175, loss 0.32350340485572815, acc 0.8247248094834886, auc: 0.8690490898608023\n",
      "train: 2018-10-25T19:50:48.531935: step 176, loss 0.30623459815979004, acc 0.8133230054221534, auc: 0.7640571244878835\n",
      "train: 2018-10-25T19:50:48.816286: step 177, loss 0.23911885917186737, acc 0.8980263157894737, auc: 0.8980010511859751\n",
      "train: 2018-10-25T19:50:49.155015: step 178, loss 0.27847570180892944, acc 0.8709163346613545, auc: 0.8507832786194958\n",
      "train: 2018-10-25T19:50:49.656498: step 179, loss 0.32243043184280396, acc 0.7673521850899743, auc: 0.8032858773672502\n",
      "train: 2018-10-25T19:50:50.090340: step 180, loss 0.28291523456573486, acc 0.8532763532763533, auc: 0.7690836298932383\n",
      "train: 2018-10-25T19:50:50.419762: step 181, loss 0.284092515707016, acc 0.8456561922365989, auc: 0.875520195297125\n",
      "train: 2018-10-25T19:50:50.699408: step 182, loss 0.2754223048686981, acc 0.8820263705759889, auc: 0.7938531346401816\n",
      "train: 2018-10-25T19:50:51.035222: step 183, loss 0.2866080403327942, acc 0.8686030428769018, auc: 0.852852199404627\n",
      "train: 2018-10-25T19:50:51.356370: step 184, loss 0.26117345690727234, acc 0.8640449438202247, auc: 0.8425399730302445\n",
      "train: 2018-10-25T19:50:51.716547: step 185, loss 0.3375237286090851, acc 0.7310774710596616, auc: 0.7975093195690468\n",
      "train: 2018-10-25T19:50:52.041759: step 186, loss 0.3165953755378723, acc 0.793351302785265, auc: 0.8020849234724621\n",
      "train: 2018-10-25T19:50:52.466047: step 187, loss 0.2676754593849182, acc 0.8457742134484886, auc: 0.8674954931187618\n",
      "train: 2018-10-25T19:50:52.774465: step 188, loss 0.32273298501968384, acc 0.8209944751381215, auc: 0.7392418108207581\n",
      "train: 2018-10-25T19:50:53.244667: step 189, loss 0.2725483179092407, acc 0.8286744815148782, auc: 0.831118956181571\n",
      "train: 2018-10-25T19:50:53.841948: step 190, loss 0.2933824360370636, acc 0.7882637628554144, auc: 0.810194955024756\n",
      "train: 2018-10-25T19:50:54.276382: step 191, loss 0.29951298236846924, acc 0.8484082870136432, auc: 0.8498449772926482\n",
      "train: 2018-10-25T19:50:54.613576: step 192, loss 0.23938827216625214, acc 0.8835227272727273, auc: 0.8900717703349283\n",
      "train: 2018-10-25T19:50:55.306032: step 193, loss 0.27912402153015137, acc 0.8050847457627118, auc: 0.8897978422922368\n",
      "train: 2018-10-25T19:50:55.593357: step 194, loss 0.28030675649642944, acc 0.8630824372759857, auc: 0.9054848037406177\n",
      "train: 2018-10-25T19:50:55.981582: step 195, loss 0.33880627155303955, acc 0.7664437012263099, auc: 0.8333280572351134\n",
      "train: 2018-10-25T19:50:56.445739: step 196, loss 0.26147979497909546, acc 0.8504446240905417, auc: 0.8794340656971624\n",
      "train: 2018-10-25T19:50:56.819876: step 197, loss 0.25846943259239197, acc 0.8527054108216433, auc: 0.7992541766109785\n",
      "train: 2018-10-25T19:50:57.145405: step 198, loss 0.28139451146125793, acc 0.838339222614841, auc: 0.8092385057471263\n",
      "train: 2018-10-25T19:50:57.525316: step 199, loss 0.3032362163066864, acc 0.8269370021723389, auc: 0.8061132643876634\n",
      "train: 2018-10-25T19:50:57.848074: step 200, loss 0.330313116312027, acc 0.8212034383954154, auc: 0.8279870207645988\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:50:59.480361, step: 200, loss: 0.305869996547699, acc: 0.8145344606346401, auc: 0.7917235161328346\n",
      "Saved model checkpoint to model/my-model-200\n",
      "\n",
      "train: 2018-10-25T19:50:59.962247: step 201, loss 0.28572559356689453, acc 0.8465949820788531, auc: 0.8877881708000384\n",
      "train: 2018-10-25T19:51:00.270253: step 202, loss 0.2840760350227356, acc 0.8592644978783592, auc: 0.8328517838614413\n",
      "train: 2018-10-25T19:51:01.147815: step 203, loss 0.23686328530311584, acc 0.8207236842105263, auc: 0.8824613448399856\n",
      "train: 2018-10-25T19:51:01.512915: step 204, loss 0.2995988130569458, acc 0.8345127925966249, auc: 0.8730779266916288\n",
      "train: 2018-10-25T19:51:01.830321: step 205, loss 0.2568877637386322, acc 0.866783523225241, auc: 0.871822686364096\n",
      "train: 2018-10-25T19:51:02.175861: step 206, loss 0.26494279503822327, acc 0.8691834942932397, auc: 0.737050936982701\n",
      "train: 2018-10-25T19:51:02.675669: step 207, loss 0.2881214916706085, acc 0.8132832080200502, auc: 0.8135479797979797\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:51:03.284230: step 208, loss 0.2857397198677063, acc 0.8122727272727273, auc: 0.8413122714300165\n",
      "train: 2018-10-25T19:51:03.704277: step 209, loss 0.29935818910598755, acc 0.812125748502994, auc: 0.8696752491433243\n",
      "train: 2018-10-25T19:51:04.027510: step 210, loss 0.246180459856987, acc 0.884514435695538, auc: 0.8342151296938216\n",
      "train: 2018-10-25T19:51:04.354314: step 211, loss 0.3014737665653229, acc 0.8147540983606557, auc: 0.8327793712098464\n",
      "train: 2018-10-25T19:51:04.765253: step 212, loss 0.3003387749195099, acc 0.8319594166138237, auc: 0.8034891009827314\n",
      "train: 2018-10-25T19:51:05.313920: step 213, loss 0.2778884768486023, acc 0.8277583624563155, auc: 0.8800592521658461\n",
      "train: 2018-10-25T19:51:05.947994: step 214, loss 0.22084015607833862, acc 0.8440559440559441, auc: 0.8683270585963897\n",
      "train: 2018-10-25T19:51:06.367618: step 215, loss 0.2592814266681671, acc 0.8027888446215139, auc: 0.8656956706669899\n",
      "train: 2018-10-25T19:51:06.907742: step 216, loss 0.28077825903892517, acc 0.788896061975468, auc: 0.8305893676045971\n",
      "train: 2018-10-25T19:51:07.247246: step 217, loss 0.265137642621994, acc 0.8537005163511188, auc: 0.8795255390291709\n",
      "train: 2018-10-25T19:51:07.595097: step 218, loss 0.2673957645893097, acc 0.8505843071786311, auc: 0.8910919540229885\n",
      "train: 2018-10-25T19:51:07.928963: step 219, loss 0.2550884485244751, acc 0.8716216216216216, auc: 0.9001391861457329\n",
      "train: 2018-10-25T19:51:08.352167: step 220, loss 0.2977258861064911, acc 0.8103186646433991, auc: 0.7741189131019639\n",
      "train: 2018-10-25T19:51:08.979526: step 221, loss 0.2513846158981323, acc 0.7813455657492355, auc: 0.7749720345173956\n",
      "train: 2018-10-25T19:51:09.456433: step 222, loss 0.278175413608551, acc 0.8590604026845637, auc: 0.8913014123088125\n",
      "train: 2018-10-25T19:51:09.879786: step 223, loss 0.33359912037849426, acc 0.7850033178500332, auc: 0.8342895460903516\n",
      "train: 2018-10-25T19:51:10.238721: step 224, loss 0.2675793170928955, acc 0.828082808280828, auc: 0.8587158756469674\n",
      "train: 2018-10-25T19:51:10.715521: step 225, loss 0.29231005907058716, acc 0.8270114942528736, auc: 0.8371213624338625\n",
      "train: 2018-10-25T19:51:11.106411: step 226, loss 0.29111650586128235, acc 0.7913998170173834, auc: 0.7848339638503572\n",
      "train: 2018-10-25T19:51:11.796702: step 227, loss 0.25722503662109375, acc 0.8224169741697417, auc: 0.8827414486221813\n",
      "train: 2018-10-25T19:51:12.385329: step 228, loss 0.24306604266166687, acc 0.8308870453095086, auc: 0.9074309059849237\n",
      "train: 2018-10-25T19:51:12.831315: step 229, loss 0.30293768644332886, acc 0.7689196995956095, auc: 0.814192507989111\n",
      "train: 2018-10-25T19:51:13.154441: step 230, loss 0.23834361135959625, acc 0.8589211618257261, auc: 0.8895757020757021\n",
      "train: 2018-10-25T19:51:13.610331: step 231, loss 0.24832433462142944, acc 0.8293103448275863, auc: 0.8298434323926791\n",
      "train: 2018-10-25T19:51:14.000116: step 232, loss 0.3037340044975281, acc 0.8452513966480447, auc: 0.8380539984234036\n",
      "train: 2018-10-25T19:51:14.472709: step 233, loss 0.27216216921806335, acc 0.8162746344564527, auc: 0.8570649879787839\n",
      "train: 2018-10-25T19:51:15.122332: step 234, loss 0.25706425309181213, acc 0.7965791081246182, auc: 0.8035805760479803\n",
      "train: 2018-10-25T19:51:15.444910: step 235, loss 0.2793879508972168, acc 0.8439269981120201, auc: 0.8378891137432296\n",
      "train: 2018-10-25T19:51:15.792672: step 236, loss 0.29275912046432495, acc 0.8017391304347826, auc: 0.8409767316017317\n",
      "train: 2018-10-25T19:51:16.533136: step 237, loss 0.20584812760353088, acc 0.8560386473429952, auc: 0.9051236195357585\n",
      "train: 2018-10-25T19:51:16.879075: step 238, loss 0.27318075299263, acc 0.8379594898724682, auc: 0.8543546931407943\n",
      "train: 2018-10-25T19:51:17.094006: step 239, loss 0.27766937017440796, acc 0.8294762484774665, auc: 0.8609258008219332\n",
      "train: 2018-10-25T19:51:17.781331: step 240, loss 0.2290484756231308, acc 0.84251036116045, auc: 0.824913226312452\n",
      "train: 2018-10-25T19:51:18.295903: step 241, loss 0.30128213763237, acc 0.7883104125736738, auc: 0.8127505990792105\n",
      "train: 2018-10-25T19:51:18.809793: step 242, loss 0.2644752562046051, acc 0.7981762917933131, auc: 0.8142921348314607\n",
      "train: 2018-10-25T19:51:19.174982: step 243, loss 0.22964034974575043, acc 0.8517566409597258, auc: 0.9091816608787192\n",
      "train: 2018-10-25T19:51:19.429110: step 244, loss 0.27795782685279846, acc 0.8092643051771117, auc: 0.8344970534444219\n",
      "train: 2018-10-25T19:51:19.988290: step 245, loss 0.26763486862182617, acc 0.8275555555555556, auc: 0.8528472835826049\n",
      "train: 2018-10-25T19:51:20.347001: step 246, loss 0.25705987215042114, acc 0.8502519798416127, auc: 0.8682307280208821\n",
      "train: 2018-10-25T19:51:20.776352: step 247, loss 0.24667632579803467, acc 0.8390557939914163, auc: 0.8734228734228734\n",
      "train: 2018-10-25T19:51:21.204119: step 248, loss 0.2333863079547882, acc 0.8630735615440641, auc: 0.8200351391772787\n",
      "train: 2018-10-25T19:51:21.463485: step 249, loss 0.2761291265487671, acc 0.7722419928825622, auc: 0.7862660944206008\n",
      "train: 2018-10-25T19:51:21.918677: step 250, loss 0.2690891921520233, acc 0.8019726858877086, auc: 0.8146133412042503\n",
      "train: 2018-10-25T19:51:22.445646: step 251, loss 0.27147382497787476, acc 0.7962874821513565, auc: 0.8515679795508035\n",
      "train: 2018-10-25T19:51:22.789553: step 252, loss 0.243943452835083, acc 0.8616600790513834, auc: 0.8750017598445705\n",
      "train: 2018-10-25T19:51:23.256740: step 253, loss 0.22697091102600098, acc 0.8386657590197413, auc: 0.8698024830416003\n",
      "train: 2018-10-25T19:51:23.559036: step 254, loss 0.22951020300388336, acc 0.8729729729729729, auc: 0.870980371046437\n",
      "train: 2018-10-25T19:51:23.920385: step 255, loss 0.24842342734336853, acc 0.8383561643835616, auc: 0.8568804843207944\n",
      "train: 2018-10-25T19:51:24.288419: step 256, loss 0.25724148750305176, acc 0.8189655172413793, auc: 0.8147273672292711\n",
      "train: 2018-10-25T19:51:24.602592: step 257, loss 0.29779890179634094, acc 0.8328964613368283, auc: 0.8030944927843804\n",
      "train: 2018-10-25T19:51:24.912921: step 258, loss 0.22483403980731964, acc 0.869980879541109, auc: 0.8670371164474938\n",
      "train: 2018-10-25T19:51:25.411860: step 259, loss 0.2894323766231537, acc 0.8091265947006869, auc: 0.8491586693877092\n",
      "train: 2018-10-25T19:51:25.890083: step 260, loss 0.22995525598526, acc 0.8562206572769953, auc: 0.8662684846557482\n",
      "train: 2018-10-25T19:51:26.343329: step 261, loss 0.2916969656944275, acc 0.8368286445012788, auc: 0.873381443298969\n",
      "train: 2018-10-25T19:51:26.685414: step 262, loss 0.2737380564212799, acc 0.8201193520886616, auc: 0.8069464201043148\n",
      "train: 2018-10-25T19:51:27.016026: step 263, loss 0.21961849927902222, acc 0.8808808808808809, auc: 0.903975490905582\n",
      "train: 2018-10-25T19:51:27.275389: step 264, loss 0.2712806165218353, acc 0.8331388564760793, auc: 0.832889420918749\n",
      "train: 2018-10-25T19:51:27.610038: step 265, loss 0.2532513439655304, acc 0.8446683459277917, auc: 0.8535180431828475\n",
      "train: 2018-10-25T19:51:28.043943: step 266, loss 0.2762628197669983, acc 0.7909429280397022, auc: 0.7802120169234656\n",
      "train: 2018-10-25T19:51:28.377881: step 267, loss 0.23734427988529205, acc 0.85, auc: 0.9056999475694705\n",
      "train: 2018-10-25T19:51:28.673779: step 268, loss 0.2189626544713974, acc 0.9053254437869822, auc: 0.8902010741233629\n",
      "train: 2018-10-25T19:51:29.045068: step 269, loss 0.16304779052734375, acc 0.9389587073608617, auc: 0.9440999101834823\n",
      "train: 2018-10-25T19:51:29.495603: step 270, loss 0.22578038275241852, acc 0.841642228739003, auc: 0.869567348939821\n",
      "train: 2018-10-25T19:51:29.913746: step 271, loss 0.24591851234436035, acc 0.8228120516499282, auc: 0.8804895829336608\n",
      "train: 2018-10-25T19:51:30.295350: step 272, loss 0.19531722366809845, acc 0.9112627986348123, auc: 0.9136703581187977\n",
      "train: 2018-10-25T19:51:30.510348: step 273, loss 0.2958131730556488, acc 0.8109339407744874, auc: 0.8068035426731079\n",
      "train: 2018-10-25T19:51:30.975100: step 274, loss 0.2081087827682495, acc 0.8489347966429954, auc: 0.879059299067567\n",
      "train: 2018-10-25T19:51:31.340226: step 275, loss 0.30264371633529663, acc 0.8187588152327221, auc: 0.7558699725054137\n",
      "train: 2018-10-25T19:51:31.844747: step 276, loss 0.29329949617385864, acc 0.7492391965916008, auc: 0.8123993411420204\n",
      "train: 2018-10-25T19:51:32.358619: step 277, loss 0.2202821522951126, acc 0.8589830508474576, auc: 0.9120392446712127\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:51:32.612463: step 278, loss 0.2301177978515625, acc 0.8679245283018868, auc: 0.8188494220486766\n",
      "train: 2018-10-25T19:51:33.049740: step 279, loss 0.2548534870147705, acc 0.7962264150943397, auc: 0.8432298845529168\n",
      "train: 2018-10-25T19:51:33.355491: step 280, loss 0.24319614470005035, acc 0.8682678311499272, auc: 0.8465059308922125\n",
      "train: 2018-10-25T19:51:33.704380: step 281, loss 0.2611614763736725, acc 0.8469706433479075, auc: 0.9191857613341741\n",
      "train: 2018-10-25T19:51:34.073176: step 282, loss 0.2593793272972107, acc 0.8600269179004038, auc: 0.8687524233551269\n",
      "train: 2018-10-25T19:51:34.476948: step 283, loss 0.25763657689094543, acc 0.7845898922949461, auc: 0.8160621915337889\n",
      "train: 2018-10-25T19:51:35.000530: step 284, loss 0.24101252853870392, acc 0.8309178743961353, auc: 0.8434717235479396\n",
      "train: 2018-10-25T19:51:35.351074: step 285, loss 0.23806343972682953, acc 0.8378825475599669, auc: 0.8717432889340646\n",
      "train: 2018-10-25T19:51:35.679260: step 286, loss 0.2911692261695862, acc 0.8032454361054767, auc: 0.8559929659086367\n",
      "train: 2018-10-25T19:51:36.133702: step 287, loss 0.19412216544151306, acc 0.8979013045944413, auc: 0.850807991180662\n",
      "train: 2018-10-25T19:51:36.684627: step 288, loss 0.20593534409999847, acc 0.8620296465222349, auc: 0.867892416615149\n",
      "train: 2018-10-25T19:51:37.168943: step 289, loss 0.27132004499435425, acc 0.7875231624459543, auc: 0.8197088714330094\n",
      "train: 2018-10-25T19:51:37.541822: step 290, loss 0.24354051053524017, acc 0.8705953827460511, auc: 0.8575102993850932\n",
      "train: 2018-10-25T19:51:38.004670: step 291, loss 0.24289986491203308, acc 0.8322487346348517, auc: 0.8121755589288056\n",
      "train: 2018-10-25T19:51:38.599755: step 292, loss 0.21709442138671875, acc 0.8502700049091801, auc: 0.9087981241357196\n",
      "train: 2018-10-25T19:51:38.995475: step 293, loss 0.20473675429821014, acc 0.8468468468468469, auc: 0.8687723522538153\n",
      "train: 2018-10-25T19:51:39.364506: step 294, loss 0.2636546790599823, acc 0.8452296819787986, auc: 0.8851351998085667\n",
      "train: 2018-10-25T19:51:39.632856: step 295, loss 0.2675724923610687, acc 0.8426596445029625, auc: 0.8001308550185873\n",
      "train: 2018-10-25T19:51:39.935317: step 296, loss 0.19149938225746155, acc 0.8606770833333334, auc: 0.8610818643343375\n",
      "train: 2018-10-25T19:51:40.847602: step 297, loss 0.19078408181667328, acc 0.8150470219435737, auc: 0.8663068927682753\n",
      "train: 2018-10-25T19:51:41.275177: step 298, loss 0.27297353744506836, acc 0.7971204188481675, auc: 0.8240670318411097\n",
      "train: 2018-10-25T19:51:41.753103: step 299, loss 0.2200329303741455, acc 0.8677354709418837, auc: 0.8330390616263809\n",
      "train: 2018-10-25T19:51:42.403448: step 300, loss 0.20640231668949127, acc 0.8102501525320317, auc: 0.88624405138753\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:51:44.057823, step: 300, loss: 0.2718260345550684, acc: 0.8142954834667399, auc: 0.7983229379707484\n",
      "Saved model checkpoint to model/my-model-300\n",
      "\n",
      "train: 2018-10-25T19:51:44.506992: step 301, loss 0.23315545916557312, acc 0.8641779189833201, auc: 0.8856690573108483\n",
      "train: 2018-10-25T19:51:44.916175: step 302, loss 0.2365826815366745, acc 0.8537894030851777, auc: 0.8743794802663338\n",
      "train: 2018-10-25T19:51:45.181599: step 303, loss 0.27546605467796326, acc 0.8200620475698035, auc: 0.8095935256918604\n",
      "train: 2018-10-25T19:51:45.839959: step 304, loss 0.21074146032333374, acc 0.7943680419122462, auc: 0.8080493145468395\n",
      "train: 2018-10-25T19:51:46.333813: step 305, loss 0.2051028460264206, acc 0.8348909657320872, auc: 0.8604584344272762\n",
      "train: 2018-10-25T19:51:46.669893: step 306, loss 0.2354871779680252, acc 0.8106343283582089, auc: 0.8708856662162621\n",
      "train: 2018-10-25T19:51:47.066310: step 307, loss 0.25774484872817993, acc 0.7958579881656804, auc: 0.7802554349195153\n",
      "train: 2018-10-25T19:51:47.470041: step 308, loss 0.2606574296951294, acc 0.8161816891412349, auc: 0.8694356040768642\n",
      "train: 2018-10-25T19:51:47.844314: step 309, loss 0.27386394143104553, acc 0.8438050499119202, auc: 0.8682467899539691\n",
      "train: 2018-10-25T19:51:48.185063: step 310, loss 0.25249746441841125, acc 0.8422310756972111, auc: 0.8668170492298399\n",
      "train: 2018-10-25T19:51:48.487087: step 311, loss 0.23398452997207642, acc 0.8779472954230236, auc: 0.9206765715823596\n",
      "train: 2018-10-25T19:51:48.787020: step 312, loss 0.20895561575889587, acc 0.8457502623294858, auc: 0.8746192400577664\n",
      "train: 2018-10-25T19:51:49.239658: step 313, loss 0.20260436832904816, acc 0.8671943711521548, auc: 0.8658831292747524\n",
      "train: 2018-10-25T19:51:49.674560: step 314, loss 0.2122301608324051, acc 0.8514851485148515, auc: 0.8704802684294872\n",
      "train: 2018-10-25T19:51:50.276148: step 315, loss 0.18035009503364563, acc 0.849624060150376, auc: 0.9013825147922658\n",
      "train: 2018-10-25T19:51:50.516455: step 316, loss 0.2633385956287384, acc 0.8615241635687733, auc: 0.756858202038925\n",
      "train: 2018-10-25T19:51:50.822329: step 317, loss 0.2576579451560974, acc 0.7886550777676121, auc: 0.8586581709145428\n",
      "train: 2018-10-25T19:51:51.105137: step 318, loss 0.23372401297092438, acc 0.8506825938566553, auc: 0.8265307029236646\n",
      "train: 2018-10-25T19:51:51.401284: step 319, loss 0.23576103150844574, acc 0.7970149253731343, auc: 0.8419424521777029\n",
      "train: 2018-10-25T19:51:51.860617: step 320, loss 0.22239136695861816, acc 0.8234527687296417, auc: 0.8612444444444445\n",
      "train: 2018-10-25T19:51:52.220274: step 321, loss 0.2105947881937027, acc 0.9105829088851161, auc: 0.8068950761888876\n",
      "train: 2018-10-25T19:51:52.673770: step 322, loss 0.27163663506507874, acc 0.8059360730593608, auc: 0.8485741754867455\n",
      "train: 2018-10-25T19:51:53.062623: step 323, loss 0.22637787461280823, acc 0.8674033149171271, auc: 0.8917764974886637\n",
      "train: 2018-10-25T19:51:53.410250: step 324, loss 0.31032633781433105, acc 0.8103448275862069, auc: 0.8291239590006406\n",
      "train: 2018-10-25T19:51:53.827886: step 325, loss 0.2289993315935135, acc 0.8339100346020761, auc: 0.8583939106616099\n",
      "train: 2018-10-25T19:51:54.525211: step 326, loss 0.23211441934108734, acc 0.7972429638139, auc: 0.8159493521268265\n",
      "train: 2018-10-25T19:51:54.800652: step 327, loss 0.259733647108078, acc 0.8443526170798898, auc: 0.8680698867206291\n",
      "train: 2018-10-25T19:51:55.200216: step 328, loss 0.1892004758119583, acc 0.8693284936479129, auc: 0.883702104695089\n",
      "train: 2018-10-25T19:51:55.640472: step 329, loss 0.23976458609104156, acc 0.8292540792540792, auc: 0.8436093628650244\n",
      "train: 2018-10-25T19:51:55.991252: step 330, loss 0.21368972957134247, acc 0.8742038216560509, auc: 0.8875730632294423\n",
      "train: 2018-10-25T19:51:56.388157: step 331, loss 0.2650177478790283, acc 0.7753753753753754, auc: 0.8428253601530459\n",
      "train: 2018-10-25T19:51:56.800314: step 332, loss 0.1879880726337433, acc 0.8504065040650407, auc: 0.8533432536923207\n",
      "train: 2018-10-25T19:51:57.054418: step 333, loss 0.22240261733531952, acc 0.8560687432867884, auc: 0.8753388563755669\n",
      "train: 2018-10-25T19:51:57.672906: step 334, loss 0.20468495786190033, acc 0.8485607008760951, auc: 0.9189372615483399\n",
      "train: 2018-10-25T19:51:58.064761: step 335, loss 0.20950651168823242, acc 0.8688046647230321, auc: 0.9208621918233798\n",
      "train: 2018-10-25T19:51:58.412860: step 336, loss 0.22951115667819977, acc 0.8384353741496599, auc: 0.8867219566576018\n",
      "train: 2018-10-25T19:51:58.903166: step 337, loss 0.23642398416996002, acc 0.8346839546191248, auc: 0.8384881773081632\n",
      "train: 2018-10-25T19:51:59.293077: step 338, loss 0.2605466842651367, acc 0.8352444176222088, auc: 0.8611003212071411\n",
      "train: 2018-10-25T19:51:59.747986: step 339, loss 0.24630901217460632, acc 0.8346545866364666, auc: 0.8384275081643503\n",
      "train: 2018-10-25T19:51:59.994601: step 340, loss 0.36955663561820984, acc 0.7450424929178471, auc: 0.8063915318944521\n",
      "train: 2018-10-25T19:52:00.331157: step 341, loss 0.2740767002105713, acc 0.8214804063860668, auc: 0.8045177728063634\n",
      "train: 2018-10-25T19:52:00.982569: step 342, loss 0.1609194427728653, acc 0.8848405985686402, auc: 0.8801619793520825\n",
      "train: 2018-10-25T19:52:01.415239: step 343, loss 0.21781036257743835, acc 0.8264352469959947, auc: 0.8575550359032502\n",
      "train: 2018-10-25T19:52:01.915820: step 344, loss 0.2763158977031708, acc 0.7782299084435402, auc: 0.8212080217808444\n",
      "train: 2018-10-25T19:52:02.220437: step 345, loss 0.2040078490972519, acc 0.8882591093117409, auc: 0.8702043271490065\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:52:02.520542: step 346, loss 0.2635197937488556, acc 0.8088709677419355, auc: 0.8901892114604463\n",
      "train: 2018-10-25T19:52:03.046078: step 347, loss 0.24247868359088898, acc 0.8035043804755945, auc: 0.8139026088704467\n",
      "train: 2018-10-25T19:52:03.500190: step 348, loss 0.22843673825263977, acc 0.8080568720379147, auc: 0.8392861853772354\n",
      "train: 2018-10-25T19:52:03.970057: step 349, loss 0.1893603652715683, acc 0.7772073921971252, auc: 0.8083611226402944\n",
      "train: 2018-10-25T19:52:04.259669: step 350, loss 0.20402057468891144, acc 0.8924930491195552, auc: 0.8565490196078431\n",
      "train: 2018-10-25T19:52:04.704813: step 351, loss 0.21792644262313843, acc 0.8020833333333334, auc: 0.8314630019048597\n",
      "train: 2018-10-25T19:52:05.026167: step 352, loss 0.20304803550243378, acc 0.8672340425531915, auc: 0.8764926157314533\n",
      "train: 2018-10-25T19:52:05.292282: step 353, loss 0.18031078577041626, acc 0.8653624856156502, auc: 0.9139629021708275\n",
      "train: 2018-10-25T19:52:05.767604: step 354, loss 0.19653339684009552, acc 0.7983193277310925, auc: 0.844840462200046\n",
      "train: 2018-10-25T19:52:06.146227: step 355, loss 0.27492034435272217, acc 0.7334801762114538, auc: 0.8185486811717515\n",
      "train: 2018-10-25T19:52:06.391198: step 356, loss 0.28020960092544556, acc 0.7968369829683698, auc: 0.8481593345879013\n",
      "train: 2018-10-25T19:52:06.826883: step 357, loss 0.23419524729251862, acc 0.8363095238095238, auc: 0.8458946087568557\n",
      "train: 2018-10-25T19:52:07.058705: step 358, loss 0.27735042572021484, acc 0.8355481727574751, auc: 0.8853419613067689\n",
      "train: 2018-10-25T19:52:07.610210: step 359, loss 0.17135615646839142, acc 0.8793800539083558, auc: 0.9213830195626798\n",
      "train: 2018-10-25T19:52:08.082517: step 360, loss 0.1756763756275177, acc 0.8908045977011494, auc: 0.9330420420348642\n",
      "train: 2018-10-25T19:52:08.431741: step 361, loss 0.25457310676574707, acc 0.8273480662983426, auc: 0.8167213719578132\n",
      "train: 2018-10-25T19:52:09.366280: step 362, loss 0.23948261141777039, acc 0.7554935472619463, auc: 0.8194899581454202\n",
      "train: 2018-10-25T19:52:09.708849: step 363, loss 0.27414676547050476, acc 0.7964118564742589, auc: 0.7853794556560736\n",
      "train: 2018-10-25T19:52:10.127277: step 364, loss 0.2089766263961792, acc 0.8549905838041432, auc: 0.8979715666163222\n",
      "train: 2018-10-25T19:52:10.420244: step 365, loss 0.22657692432403564, acc 0.8937809576224546, auc: 0.8433635095037304\n",
      "train: 2018-10-25T19:52:10.919740: step 366, loss 0.2157592624425888, acc 0.8425155925155925, auc: 0.8380655418808381\n",
      "train: 2018-10-25T19:52:11.222570: step 367, loss 0.22829000651836395, acc 0.8453781512605042, auc: 0.7909266547406082\n",
      "train: 2018-10-25T19:52:11.666158: step 368, loss 0.21189627051353455, acc 0.8579804560260587, auc: 0.8464967021460591\n",
      "train: 2018-10-25T19:52:12.126108: step 369, loss 0.2194042056798935, acc 0.8014184397163121, auc: 0.8534522439585731\n",
      "train: 2018-10-25T19:52:12.580685: step 370, loss 0.2051745504140854, acc 0.8335949764521193, auc: 0.8212403538490494\n",
      "train: 2018-10-25T19:52:12.901984: step 371, loss 0.16657406091690063, acc 0.8785471055618616, auc: 0.880392647242716\n",
      "train: 2018-10-25T19:52:13.250305: step 372, loss 0.21349358558654785, acc 0.8122895622895623, auc: 0.9040508000036165\n",
      "train: 2018-10-25T19:52:13.538735: step 373, loss 0.22637178003787994, acc 0.845643153526971, auc: 0.8242154582532781\n",
      "train: 2018-10-25T19:52:13.946901: step 374, loss 0.19577381014823914, acc 0.8626320845341018, auc: 0.910311037154695\n",
      "train: 2018-10-25T19:52:14.420618: step 375, loss 0.25415363907814026, acc 0.8133478024959305, auc: 0.8360296203400965\n",
      "train: 2018-10-25T19:52:14.827957: step 376, loss 0.2433280050754547, acc 0.8584447144592953, auc: 0.8542177914110429\n",
      "train: 2018-10-25T19:52:15.144480: step 377, loss 0.2042422592639923, acc 0.8528481012658228, auc: 0.8624377764421224\n",
      "train: 2018-10-25T19:52:15.596354: step 378, loss 0.2429962158203125, acc 0.807185628742515, auc: 0.8217352142005098\n",
      "train: 2018-10-25T19:52:15.899287: step 379, loss 0.23757816851139069, acc 0.8251928020565553, auc: 0.8889331084453036\n",
      "train: 2018-10-25T19:52:16.239890: step 380, loss 0.19897247850894928, acc 0.8753943217665615, auc: 0.87060759459617\n",
      "train: 2018-10-25T19:52:16.560936: step 381, loss 0.22426417469978333, acc 0.8289703315881326, auc: 0.9122419513393955\n",
      "train: 2018-10-25T19:52:17.114484: step 382, loss 0.1881488859653473, acc 0.7908163265306123, auc: 0.8394397431362411\n",
      "train: 2018-10-25T19:52:17.959599: step 383, loss 0.18366111814975739, acc 0.804, auc: 0.8541420659019342\n",
      "train: 2018-10-25T19:52:18.259811: step 384, loss 0.24046145379543304, acc 0.8415124698310539, auc: 0.8047077646602506\n",
      "train: 2018-10-25T19:52:18.772782: step 385, loss 0.1729675829410553, acc 0.8215102974828375, auc: 0.8642222222222222\n",
      "train: 2018-10-25T19:52:19.087891: step 386, loss 0.2168029248714447, acc 0.8724528301886793, auc: 0.8301692589204026\n",
      "train: 2018-10-25T19:52:19.413639: step 387, loss 0.22176459431648254, acc 0.85546875, auc: 0.9039050250130666\n",
      "train: 2018-10-25T19:52:19.913224: step 388, loss 0.1969241201877594, acc 0.8726851851851852, auc: 0.8717193535639434\n",
      "train: 2018-10-25T19:52:20.311295: step 389, loss 0.18391886353492737, acc 0.8899388549193996, auc: 0.8765850687658507\n",
      "train: 2018-10-25T19:52:20.672734: step 390, loss 0.19894184172153473, acc 0.8451882845188284, auc: 0.898462913065937\n",
      "train: 2018-10-25T19:52:21.083228: step 391, loss 0.1617840826511383, acc 0.8586171310629515, auc: 0.869573890839451\n",
      "train: 2018-10-25T19:52:21.433732: step 392, loss 0.2500593960285187, acc 0.8368745716244003, auc: 0.8501643392047824\n",
      "train: 2018-10-25T19:52:22.088002: step 393, loss 0.17797043919563293, acc 0.85728592889334, auc: 0.8717257895524622\n",
      "train: 2018-10-25T19:52:22.442560: step 394, loss 0.24145840108394623, acc 0.856071964017991, auc: 0.7758340218963025\n",
      "train: 2018-10-25T19:52:22.744124: step 395, loss 0.21081100404262543, acc 0.8747203579418344, auc: 0.898099786414985\n",
      "train: 2018-10-25T19:52:23.082190: step 396, loss 0.1864376962184906, acc 0.8734283319362951, auc: 0.901065510858278\n",
      "train: 2018-10-25T19:52:23.442787: step 397, loss 0.19564788043498993, acc 0.8583473861720068, auc: 0.847913503939854\n",
      "train: 2018-10-25T19:52:24.058939: step 398, loss 0.17587557435035706, acc 0.8650519031141869, auc: 0.9266250984656009\n",
      "train: 2018-10-25T19:52:24.282377: step 399, loss 0.2201097309589386, acc 0.8600311041990669, auc: 0.8776722090261284\n",
      "train: 2018-10-25T19:52:24.663458: step 400, loss 0.21218308806419373, acc 0.8539985326485693, auc: 0.8481576848735519\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:52:26.247626, step: 400, loss: 0.24981679595433748, acc: 0.8183212290528468, auc: 0.8076250869978913\n",
      "Saved model checkpoint to model/my-model-400\n",
      "\n",
      "train: 2018-10-25T19:52:26.680027: step 401, loss 0.229148730635643, acc 0.8410041841004184, auc: 0.9088790455458874\n",
      "train: 2018-10-25T19:52:26.948272: step 402, loss 0.18805617094039917, acc 0.9211237169097785, auc: 0.8690914158305463\n",
      "train: 2018-10-25T19:52:27.430435: step 403, loss 0.17328576743602753, acc 0.8758888170652877, auc: 0.8568961330662297\n",
      "train: 2018-10-25T19:52:27.916670: step 404, loss 0.21770834922790527, acc 0.8212034383954154, auc: 0.8688379221467457\n",
      "train: 2018-10-25T19:52:28.314692: step 405, loss 0.17459595203399658, acc 0.8936363636363637, auc: 0.8995551088028889\n",
      "train: 2018-10-25T19:52:28.786413: step 406, loss 0.21628542244434357, acc 0.8466386554621849, auc: 0.8277112765968535\n",
      "train: 2018-10-25T19:52:29.335403: step 407, loss 0.25270992517471313, acc 0.7993527508090615, auc: 0.8614412363595414\n",
      "train: 2018-10-25T19:52:29.674985: step 408, loss 0.20235441625118256, acc 0.8159879336349924, auc: 0.797637695271157\n",
      "train: 2018-10-25T19:52:30.034306: step 409, loss 0.19482974708080292, acc 0.8075072184793071, auc: 0.8162543314555626\n",
      "train: 2018-10-25T19:52:30.485326: step 410, loss 0.1624392718076706, acc 0.8424304840370752, auc: 0.8730960195236893\n",
      "train: 2018-10-25T19:52:30.747742: step 411, loss 0.25108054280281067, acc 0.8621718377088305, auc: 0.8839775219298246\n",
      "train: 2018-10-25T19:52:31.069577: step 412, loss 0.2550065219402313, acc 0.7791164658634538, auc: 0.8620765319775221\n",
      "train: 2018-10-25T19:52:31.572810: step 413, loss 0.18273138999938965, acc 0.8019933554817276, auc: 0.8384204876686505\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:52:31.999055: step 414, loss 0.23766455054283142, acc 0.8089960886571056, auc: 0.8093991447999946\n",
      "train: 2018-10-25T19:52:32.303016: step 415, loss 0.23265264928340912, acc 0.8003646308113036, auc: 0.8516467270163308\n",
      "train: 2018-10-25T19:52:32.767683: step 416, loss 0.21114066243171692, acc 0.8112582781456954, auc: 0.8228066501112009\n",
      "train: 2018-10-25T19:52:33.223512: step 417, loss 0.24549464881420135, acc 0.8366902558519325, auc: 0.8590149400960211\n",
      "train: 2018-10-25T19:52:33.463194: step 418, loss 0.1419600546360016, acc 0.906318082788671, auc: 0.915439706862092\n",
      "train: 2018-10-25T19:52:33.755306: step 419, loss 0.1529112607240677, acc 0.8898104265402843, auc: 0.9141822953526776\n",
      "train: 2018-10-25T19:52:34.028518: step 420, loss 0.24219314754009247, acc 0.815946348733234, auc: 0.8634659077508471\n",
      "train: 2018-10-25T19:52:34.477770: step 421, loss 0.2501632869243622, acc 0.8048538880633977, auc: 0.8385261150280613\n",
      "train: 2018-10-25T19:52:34.674742: step 422, loss 0.18501000106334686, acc 0.8693982074263764, auc: 0.8886680273316178\n",
      "train: 2018-10-25T19:52:34.998346: step 423, loss 0.22782233357429504, acc 0.8183161004431314, auc: 0.8358549036465137\n",
      "train: 2018-10-25T19:52:35.470910: step 424, loss 0.24771524965763092, acc 0.8117964880684376, auc: 0.8725819349167534\n",
      "train: 2018-10-25T19:52:35.771611: step 425, loss 0.2248845249414444, acc 0.829205807002562, auc: 0.8728851963746224\n",
      "train: 2018-10-25T19:52:36.079622: step 426, loss 0.17799042165279388, acc 0.8903225806451613, auc: 0.7749250916391623\n",
      "train: 2018-10-25T19:52:36.419776: step 427, loss 0.19760653376579285, acc 0.8986083499005965, auc: 0.8371369452432029\n",
      "train: 2018-10-25T19:52:36.926937: step 428, loss 0.1974187046289444, acc 0.8322147651006712, auc: 0.8889226637146461\n",
      "train: 2018-10-25T19:52:37.431294: step 429, loss 0.24462875723838806, acc 0.7733812949640287, auc: 0.8135810398956564\n",
      "train: 2018-10-25T19:52:37.790836: step 430, loss 0.2258453667163849, acc 0.8184991273996509, auc: 0.8846162462834409\n",
      "train: 2018-10-25T19:52:38.220873: step 431, loss 0.17976653575897217, acc 0.8663793103448276, auc: 0.9100459725459724\n",
      "train: 2018-10-25T19:52:38.848020: step 432, loss 0.17611581087112427, acc 0.8230814294083187, auc: 0.886241979061128\n",
      "train: 2018-10-25T19:52:39.166414: step 433, loss 0.24362494051456451, acc 0.8512696493349455, auc: 0.8953098783047174\n",
      "train: 2018-10-25T19:52:39.616959: step 434, loss 0.21920302510261536, acc 0.8566519391261659, auc: 0.8968132440300097\n",
      "train: 2018-10-25T19:52:40.213841: step 435, loss 0.1957656443119049, acc 0.7975391498881432, auc: 0.8255776272482364\n",
      "train: 2018-10-25T19:52:40.676290: step 436, loss 0.16348689794540405, acc 0.8529199711607787, auc: 0.8591688462656204\n",
      "train: 2018-10-25T19:52:41.000839: step 437, loss 0.21536020934581757, acc 0.8792650918635171, auc: 0.834077995650048\n",
      "train: 2018-10-25T19:52:41.374491: step 438, loss 0.19215808808803558, acc 0.8320926385442514, auc: 0.8555514797486226\n",
      "train: 2018-10-25T19:52:41.696092: step 439, loss 0.15929313004016876, acc 0.8985868661679135, auc: 0.8901510337748668\n",
      "train: 2018-10-25T19:52:42.036125: step 440, loss 0.2521710693836212, acc 0.7702589807852965, auc: 0.8682046609387839\n",
      "train: 2018-10-25T19:52:42.452107: step 441, loss 0.20488320291042328, acc 0.8096280087527352, auc: 0.8360432975442722\n",
      "train: 2018-10-25T19:52:42.790730: step 442, loss 0.19956527650356293, acc 0.8660990712074303, auc: 0.8932156567752645\n",
      "train: 2018-10-25T19:52:43.246788: step 443, loss 0.18939876556396484, acc 0.8395437262357415, auc: 0.8386746147425594\n",
      "train: 2018-10-25T19:52:43.670201: step 444, loss 0.20315031707286835, acc 0.8588158750813273, auc: 0.9008812434384953\n",
      "train: 2018-10-25T19:52:44.342395: step 445, loss 0.16755089163780212, acc 0.8223261250730567, auc: 0.9011178248566555\n",
      "train: 2018-10-25T19:52:45.108755: step 446, loss 0.15895190834999084, acc 0.8251626898047723, auc: 0.9057992251333\n",
      "train: 2018-10-25T19:52:45.558680: step 447, loss 0.19286037981510162, acc 0.824390243902439, auc: 0.8356045152552853\n",
      "train: 2018-10-25T19:52:46.011663: step 448, loss 0.1664188951253891, acc 0.8481308411214953, auc: 0.849147432572614\n",
      "train: 2018-10-25T19:52:46.358799: step 449, loss 0.18428058922290802, acc 0.8867924528301887, auc: 0.9009663072163072\n",
      "train: 2018-10-25T19:52:46.659277: step 450, loss 0.15516029298305511, acc 0.8581460674157303, auc: 0.8810376789837024\n",
      "train: 2018-10-25T19:52:47.179155: step 451, loss 0.18385283648967743, acc 0.8138825324180016, auc: 0.7995282035396649\n",
      "train: 2018-10-25T19:52:47.730197: step 452, loss 0.16337339580059052, acc 0.827073552425665, auc: 0.8440472337035008\n",
      "train: 2018-10-25T19:52:48.194581: step 453, loss 0.18396644294261932, acc 0.8140845070422535, auc: 0.8612259605822575\n",
      "train: 2018-10-25T19:52:48.539605: step 454, loss 0.13517318665981293, acc 0.9386996904024768, auc: 0.9605540166204986\n",
      "train: 2018-10-25T19:52:48.904202: step 455, loss 0.27334290742874146, acc 0.8196319018404908, auc: 0.8342343159068916\n",
      "train: 2018-10-25T19:52:49.300004: step 456, loss 0.21345193684101105, acc 0.8796296296296297, auc: 0.9115384336238869\n",
      "train: 2018-10-25T19:52:49.640498: step 457, loss 0.25568631291389465, acc 0.7968185104844541, auc: 0.8551462465779742\n",
      "train: 2018-10-25T19:52:49.967442: step 458, loss 0.19544397294521332, acc 0.8772619984264359, auc: 0.841214828722058\n",
      "train: 2018-10-25T19:52:50.588680: step 459, loss 0.19084987044334412, acc 0.8497292418772563, auc: 0.8799140458422173\n",
      "train: 2018-10-25T19:52:51.094938: step 460, loss 0.17526626586914062, acc 0.8246597277822257, auc: 0.8806259707200008\n",
      "train: 2018-10-25T19:52:51.752214: step 461, loss 0.16468119621276855, acc 0.8283723875870804, auc: 0.8772851750425579\n",
      "train: 2018-10-25T19:52:52.231409: step 462, loss 0.16654570400714874, acc 0.8436744560838034, auc: 0.881491138773299\n",
      "train: 2018-10-25T19:52:52.580588: step 463, loss 0.21418531239032745, acc 0.8492791612057667, auc: 0.9142998319223108\n",
      "train: 2018-10-25T19:52:52.883249: step 464, loss 0.21080951392650604, acc 0.8647540983606558, auc: 0.8881758709295795\n",
      "train: 2018-10-25T19:52:53.167588: step 465, loss 0.2523636519908905, acc 0.8224431818181818, auc: 0.8293147979982292\n",
      "train: 2018-10-25T19:52:53.557015: step 466, loss 0.17867261171340942, acc 0.8622147083685545, auc: 0.8937119917325809\n",
      "train: 2018-10-25T19:52:54.120028: step 467, loss 0.1579952985048294, acc 0.8540609137055838, auc: 0.8782992776589622\n",
      "train: 2018-10-25T19:52:54.406965: step 468, loss 0.18526875972747803, acc 0.8594339622641509, auc: 0.8523953488372094\n",
      "train: 2018-10-25T19:52:54.824263: step 469, loss 0.2251245528459549, acc 0.8268041237113402, auc: 0.8365215073604789\n",
      "train: 2018-10-25T19:52:55.305411: step 470, loss 0.12063727527856827, acc 0.9140401146131805, auc: 0.9602284559634635\n",
      "train: 2018-10-25T19:52:55.587352: step 471, loss 0.2067727893590927, acc 0.8812307692307693, auc: 0.8974750727720275\n",
      "train: 2018-10-25T19:52:55.893259: step 472, loss 0.16830262541770935, acc 0.8253333333333334, auc: 0.9019725044829647\n",
      "train: 2018-10-25T19:52:56.290104: step 473, loss 0.1812310367822647, acc 0.8347676419965576, auc: 0.884169423827834\n",
      "train: 2018-10-25T19:52:56.790320: step 474, loss 0.20574116706848145, acc 0.8335435056746532, auc: 0.8780889838829057\n",
      "train: 2018-10-25T19:52:57.165418: step 475, loss 0.20090946555137634, acc 0.864406779661017, auc: 0.9000554304521046\n",
      "train: 2018-10-25T19:52:57.419057: step 476, loss 0.22042688727378845, acc 0.8101851851851852, auc: 0.8172331078597163\n",
      "train: 2018-10-25T19:52:57.691802: step 477, loss 0.21185733377933502, acc 0.8484362469927826, auc: 0.850608772627362\n",
      "train: 2018-10-25T19:52:58.122207: step 478, loss 0.22637054324150085, acc 0.8328509406657019, auc: 0.8737974404236539\n",
      "train: 2018-10-25T19:52:58.459193: step 479, loss 0.17659541964530945, acc 0.7937743190661478, auc: 0.8586675600650905\n",
      "train: 2018-10-25T19:52:58.910709: step 480, loss 0.20173770189285278, acc 0.8532311062431545, auc: 0.897417778409514\n",
      "train: 2018-10-25T19:52:59.202286: step 481, loss 0.242875874042511, acc 0.8182640144665461, auc: 0.7998441312008675\n",
      "train: 2018-10-25T19:52:59.538275: step 482, loss 0.1764775663614273, acc 0.9076693968726731, auc: 0.8816085650440775\n",
      "train: 2018-10-25T19:52:59.848251: step 483, loss 0.16684313118457794, acc 0.867005076142132, auc: 0.8695748133722817\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:53:00.212601: step 484, loss 0.2342788577079773, acc 0.8620689655172413, auc: 0.8789074406836382\n",
      "train: 2018-10-25T19:53:00.565961: step 485, loss 0.20330120623111725, acc 0.8044280442804428, auc: 0.8374578984730172\n",
      "train: 2018-10-25T19:53:01.030761: step 486, loss 0.16138909757137299, acc 0.8424, auc: 0.8880917348286488\n",
      "train: 2018-10-25T19:53:01.310526: step 487, loss 0.1837901622056961, acc 0.8390446521287642, auc: 0.9060304699424542\n",
      "train: 2018-10-25T19:53:01.827921: step 488, loss 0.23207269608974457, acc 0.7957941339236303, auc: 0.835271909531806\n",
      "train: 2018-10-25T19:53:02.230025: step 489, loss 0.18221166729927063, acc 0.8396880415944541, auc: 0.8493218134522482\n",
      "train: 2018-10-25T19:53:02.554461: step 490, loss 0.16567261517047882, acc 0.887719298245614, auc: 0.8879798868636077\n",
      "train: 2018-10-25T19:53:02.736290: step 491, loss 0.17341169714927673, acc 0.8603351955307262, auc: 0.8881711812790511\n",
      "train: 2018-10-25T19:53:03.181404: step 492, loss 0.2008458822965622, acc 0.8114241001564946, auc: 0.8486022043938812\n",
      "train: 2018-10-25T19:53:03.440726: step 493, loss 0.19003666937351227, acc 0.8639344262295082, auc: 0.8799502780920812\n",
      "train: 2018-10-25T19:53:03.730300: step 494, loss 0.21790242195129395, acc 0.8620689655172413, auc: 0.8552693755541751\n",
      "train: 2018-10-25T19:53:04.043728: step 495, loss 0.20811717212200165, acc 0.8344709897610921, auc: 0.843335290663535\n",
      "train: 2018-10-25T19:53:04.347831: step 496, loss 0.16912056505680084, acc 0.893296853625171, auc: 0.8853286754133717\n",
      "train: 2018-10-25T19:53:04.627851: step 497, loss 0.20327691733837128, acc 0.8647450110864745, auc: 0.887059982723248\n",
      "train: 2018-10-25T19:53:04.998652: step 498, loss 0.21514196693897247, acc 0.8204941860465116, auc: 0.8904698653568663\n",
      "train: 2018-10-25T19:53:05.496682: step 499, loss 0.19438843429088593, acc 0.8566271273852502, auc: 0.8750109748809756\n",
      "train: 2018-10-25T19:53:05.915893: step 500, loss 0.20767664909362793, acc 0.8537220197813639, auc: 0.8911118104572224\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:53:07.571845, step: 500, loss: 0.23799424446546114, acc: 0.8203982237489652, auc: 0.8113552353040296\n",
      "Saved model checkpoint to model/my-model-500\n",
      "\n",
      "train: 2018-10-25T19:53:08.066322: step 501, loss 0.20768824219703674, acc 0.8329156223893066, auc: 0.8613036749091172\n",
      "train: 2018-10-25T19:53:08.370674: step 502, loss 0.1789974570274353, acc 0.8199320498301246, auc: 0.8846115767936109\n",
      "train: 2018-10-25T19:53:08.676658: step 503, loss 0.25052449107170105, acc 0.8453747467927076, auc: 0.846474382612741\n",
      "train: 2018-10-25T19:53:08.995061: step 504, loss 0.19096840918064117, acc 0.8508997429305912, auc: 0.8853280222101515\n",
      "train: 2018-10-25T19:53:09.293391: step 505, loss 0.2299506813287735, acc 0.85995085995086, auc: 0.8611069418386492\n",
      "train: 2018-10-25T19:53:09.789033: step 506, loss 0.19656987488269806, acc 0.8348951360999554, auc: 0.8900189971642987\n",
      "train: 2018-10-25T19:53:10.086736: step 507, loss 0.13243767619132996, acc 0.8778173190984578, auc: 0.9295601278205295\n",
      "train: 2018-10-25T19:53:10.417316: step 508, loss 0.22655825316905975, acc 0.8019720624486442, auc: 0.8797729343414622\n",
      "train: 2018-10-25T19:53:10.850328: step 509, loss 0.1695839911699295, acc 0.8654791154791155, auc: 0.9222425886550822\n",
      "train: 2018-10-25T19:53:11.189885: step 510, loss 0.19226518273353577, acc 0.8736089030206677, auc: 0.8758074991257296\n",
      "train: 2018-10-25T19:53:11.494913: step 511, loss 0.19058823585510254, acc 0.8387609213661636, auc: 0.9052414948641363\n",
      "train: 2018-10-25T19:53:12.027270: step 512, loss 0.20433874428272247, acc 0.7874472573839663, auc: 0.8569377422245251\n",
      "train: 2018-10-25T19:53:12.365720: step 513, loss 0.1534632295370102, acc 0.8662790697674418, auc: 0.819104320337197\n",
      "train: 2018-10-25T19:53:12.838950: step 514, loss 0.1629014015197754, acc 0.8783783783783784, auc: 0.9146094737491137\n",
      "train: 2018-10-25T19:53:13.252944: step 515, loss 0.1804298609495163, acc 0.8498985801217038, auc: 0.8694422194899213\n",
      "train: 2018-10-25T19:53:13.851346: step 516, loss 0.17192737758159637, acc 0.8291139240506329, auc: 0.8953593750000001\n",
      "train: 2018-10-25T19:53:14.222629: step 517, loss 0.18801037967205048, acc 0.84375, auc: 0.8975474214064801\n",
      "train: 2018-10-25T19:53:14.469278: step 518, loss 0.17305488884449005, acc 0.8810356892932121, auc: 0.8905827483543196\n",
      "train: 2018-10-25T19:53:15.417022: step 519, loss 0.13310612738132477, acc 0.8355688027819176, auc: 0.9050897722039962\n",
      "train: 2018-10-25T19:53:15.744301: step 520, loss 0.2247554063796997, acc 0.8647917793401839, auc: 0.8584162041319765\n",
      "train: 2018-10-25T19:53:16.064550: step 521, loss 0.19280733168125153, acc 0.8401913875598086, auc: 0.8964330102861674\n",
      "train: 2018-10-25T19:53:16.388473: step 522, loss 0.20636726915836334, acc 0.8838582677165354, auc: 0.8338416780066633\n",
      "train: 2018-10-25T19:53:17.001762: step 523, loss 0.2184576839208603, acc 0.8102587380844303, auc: 0.8432655746596311\n",
      "train: 2018-10-25T19:53:17.378713: step 524, loss 0.1724507361650467, acc 0.8776194467728415, auc: 0.8702298984712336\n",
      "train: 2018-10-25T19:53:18.039854: step 525, loss 0.12517446279525757, acc 0.9086932750136687, auc: 0.9669992088216388\n",
      "train: 2018-10-25T19:53:18.455141: step 526, loss 0.24323701858520508, acc 0.8021300448430493, auc: 0.857754255624473\n",
      "train: 2018-10-25T19:53:18.920140: step 527, loss 0.18671651184558868, acc 0.8321377331420373, auc: 0.8619179461364208\n",
      "train: 2018-10-25T19:53:19.231582: step 528, loss 0.18669213354587555, acc 0.8412984670874661, auc: 0.8615411512740941\n",
      "train: 2018-10-25T19:53:19.610122: step 529, loss 0.1784684807062149, acc 0.868020304568528, auc: 0.8557380405504833\n",
      "train: 2018-10-25T19:53:20.008444: step 530, loss 0.16535243391990662, acc 0.8806497175141242, auc: 0.9136116453250956\n",
      "train: 2018-10-25T19:53:20.492146: step 531, loss 0.2389180064201355, acc 0.8385627530364372, auc: 0.8639843017357038\n",
      "train: 2018-10-25T19:53:20.859713: step 532, loss 0.2184869796037674, acc 0.8466076696165191, auc: 0.8571071681189519\n",
      "train: 2018-10-25T19:53:21.220401: step 533, loss 0.17230823636054993, acc 0.9235511713933415, auc: 0.8533730657379816\n",
      "train: 2018-10-25T19:53:21.674786: step 534, loss 0.1854362189769745, acc 0.8475073313782991, auc: 0.8846178829546464\n",
      "train: 2018-10-25T19:53:22.291860: step 535, loss 0.19226999580860138, acc 0.8023797206414899, auc: 0.8690284804381538\n",
      "train: 2018-10-25T19:53:22.563488: step 536, loss 0.19313541054725647, acc 0.84375, auc: 0.8822897871311894\n",
      "train: 2018-10-25T19:53:22.807708: step 537, loss 0.15457645058631897, acc 0.893048128342246, auc: 0.9049614506341214\n",
      "train: 2018-10-25T19:53:23.079887: step 538, loss 0.13335517048835754, acc 0.8879518072289156, auc: 0.9392702368054481\n",
      "train: 2018-10-25T19:53:23.388192: step 539, loss 0.17199262976646423, acc 0.7902330743618202, auc: 0.8359425962165687\n",
      "train: 2018-10-25T19:53:23.680267: step 540, loss 0.21019403636455536, acc 0.8536931818181818, auc: 0.8453613750400343\n",
      "train: 2018-10-25T19:53:24.022340: step 541, loss 0.13992103934288025, acc 0.9160892994611239, auc: 0.9211904278854979\n",
      "train: 2018-10-25T19:53:24.309472: step 542, loss 0.2076990157365799, acc 0.86693822131704, auc: 0.8585475702512286\n",
      "train: 2018-10-25T19:53:24.682373: step 543, loss 0.15877163410186768, acc 0.8836206896551724, auc: 0.9489015271803458\n",
      "train: 2018-10-25T19:53:25.062653: step 544, loss 0.148208886384964, acc 0.8890995260663507, auc: 0.8948488560575126\n",
      "train: 2018-10-25T19:53:25.528165: step 545, loss 0.14616644382476807, acc 0.8977900552486188, auc: 0.905752172386461\n",
      "train: 2018-10-25T19:53:25.809769: step 546, loss 0.21774892508983612, acc 0.8379013312451057, auc: 0.901111074460131\n",
      "train: 2018-10-25T19:53:26.075346: step 547, loss 0.18263910710811615, acc 0.9084924292297564, auc: 0.9339696005655708\n",
      "train: 2018-10-25T19:53:26.712607: step 548, loss 0.1819593608379364, acc 0.8172514619883041, auc: 0.8845415233714161\n",
      "train: 2018-10-25T19:53:27.066426: step 549, loss 0.14315257966518402, acc 0.9016881827209533, auc: 0.8957048533115514\n",
      "train: 2018-10-25T19:53:27.570766: step 550, loss 0.1839948296546936, acc 0.8495518565941101, auc: 0.8992878902079015\n",
      "train: 2018-10-25T19:53:27.940417: step 551, loss 0.18442511558532715, acc 0.8728943338437979, auc: 0.8788342479245814\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:53:28.347574: step 552, loss 0.1839083433151245, acc 0.8631940469376074, auc: 0.9302833055521929\n",
      "train: 2018-10-25T19:53:28.843109: step 553, loss 0.13273639976978302, acc 0.8768844221105527, auc: 0.8533134216912451\n",
      "train: 2018-10-25T19:53:29.557541: step 554, loss 0.18769443035125732, acc 0.800383877159309, auc: 0.884826198826954\n",
      "train: 2018-10-25T19:53:30.075305: step 555, loss 0.14059986174106598, acc 0.8707539353769677, auc: 0.8767720912401764\n",
      "train: 2018-10-25T19:53:30.557814: step 556, loss 0.18032091856002808, acc 0.7922178988326848, auc: 0.8464426877470355\n",
      "train: 2018-10-25T19:53:30.984436: step 557, loss 0.21715065836906433, acc 0.7998866213151927, auc: 0.8644112406867508\n",
      "train: 2018-10-25T19:53:31.490231: step 558, loss 0.21283672749996185, acc 0.7814530419373893, auc: 0.849125161227072\n",
      "train: 2018-10-25T19:53:31.769529: step 559, loss 0.17430315911769867, acc 0.857824427480916, auc: 0.8941339136163915\n",
      "train: 2018-10-25T19:53:32.135229: step 560, loss 0.17363537847995758, acc 0.8598665395614872, auc: 0.8388815337220256\n",
      "train: 2018-10-25T19:53:32.455878: step 561, loss 0.19512169063091278, acc 0.8439407149084568, auc: 0.8679239931934204\n",
      "train: 2018-10-25T19:53:32.902245: step 562, loss 0.17952458560466766, acc 0.8161499639509733, auc: 0.8687353629976581\n",
      "train: 2018-10-25T19:53:33.317760: step 563, loss 0.1839476227760315, acc 0.8050632911392405, auc: 0.8712097390054354\n",
      "train: 2018-10-25T19:53:33.760941: step 564, loss 0.15176936984062195, acc 0.8623639191290824, auc: 0.8812940683075148\n",
      "train: 2018-10-25T19:53:34.503631: step 565, loss 0.12158490717411041, acc 0.803446075303127, auc: 0.8835028961213549\n",
      "train: 2018-10-25T19:53:34.820897: step 566, loss 0.1686161607503891, acc 0.8856382978723404, auc: 0.8353437690888561\n",
      "train: 2018-10-25T19:53:35.259917: step 567, loss 0.16841048002243042, acc 0.8184143222506394, auc: 0.8671674819473562\n",
      "train: 2018-10-25T19:53:35.584575: step 568, loss 0.2184186726808548, acc 0.8543256997455471, auc: 0.9085679292190699\n",
      "train: 2018-10-25T19:53:35.869627: step 569, loss 0.13601280748844147, acc 0.911986588432523, auc: 0.9575724500790619\n",
      "train: 2018-10-25T19:53:36.188645: step 570, loss 0.12485158443450928, acc 0.9421487603305785, auc: 0.857995642701525\n",
      "train: 2018-10-25T19:53:36.658317: step 571, loss 0.17544054985046387, acc 0.8518762343647136, auc: 0.9069529404639332\n",
      "train: 2018-10-25T19:53:36.932309: step 572, loss 0.18052354454994202, acc 0.8807665010645848, auc: 0.9209184884991056\n",
      "train: 2018-10-25T19:53:37.269589: step 573, loss 0.26467180252075195, acc 0.8257491675915649, auc: 0.8286650156053531\n",
      "train: 2018-10-25T19:53:37.841075: step 574, loss 0.13352668285369873, acc 0.8918406072106262, auc: 0.943749213428034\n",
      "train: 2018-10-25T19:53:38.232089: step 575, loss 0.17689958214759827, acc 0.8402505873140172, auc: 0.8159027305045004\n",
      "train: 2018-10-25T19:53:38.910030: step 576, loss 0.14382006227970123, acc 0.8891566265060241, auc: 0.890685094623163\n",
      "train: 2018-10-25T19:53:39.415569: step 577, loss 0.16354329884052277, acc 0.8664383561643836, auc: 0.8606286959228135\n",
      "train: 2018-10-25T19:53:39.798354: step 578, loss 0.15912604331970215, acc 0.8091334894613583, auc: 0.8826577614002764\n",
      "train: 2018-10-25T19:53:40.320813: step 579, loss 0.15461857616901398, acc 0.842326139088729, auc: 0.8847461710380885\n",
      "train: 2018-10-25T19:53:40.644237: step 580, loss 0.21398581564426422, acc 0.813989239046887, auc: 0.8863376794016575\n",
      "train: 2018-10-25T19:53:40.926701: step 581, loss 0.1531641185283661, acc 0.8612945838837517, auc: 0.9039839698255541\n",
      "train: 2018-10-25T19:53:41.298181: step 582, loss 0.1617329865694046, acc 0.87823585810163, auc: 0.8139651755379389\n",
      "train: 2018-10-25T19:53:41.648780: step 583, loss 0.1864982396364212, acc 0.8508196721311475, auc: 0.8713575382306032\n",
      "train: 2018-10-25T19:53:42.051204: step 584, loss 0.1905289590358734, acc 0.8320135746606335, auc: 0.8543105626684636\n",
      "train: 2018-10-25T19:53:42.295427: step 585, loss 0.1959480196237564, acc 0.8775226165622826, auc: 0.8529042343320158\n",
      "train: 2018-10-25T19:53:42.554006: step 586, loss 0.23366835713386536, acc 0.8114754098360656, auc: 0.86253056661776\n",
      "train: 2018-10-25T19:53:42.837279: step 587, loss 0.21074096858501434, acc 0.8538516918646508, auc: 0.8781781376518218\n",
      "train: 2018-10-25T19:53:43.179542: step 588, loss 0.15611359477043152, acc 0.8634782608695653, auc: 0.8888461538461537\n",
      "train: 2018-10-25T19:53:43.495961: step 589, loss 0.21456317603588104, acc 0.835920177383592, auc: 0.8596159821040203\n",
      "train: 2018-10-25T19:53:43.720504: step 590, loss 0.24804668128490448, acc 0.848582995951417, auc: 0.8552872748600895\n",
      "train: 2018-10-25T19:53:44.279641: step 591, loss 0.21318526566028595, acc 0.8094534711964549, auc: 0.8690944102433718\n",
      "train: 2018-10-25T19:53:44.668201: step 592, loss 0.17582519352436066, acc 0.846696600384862, auc: 0.9217585020019468\n",
      "train: 2018-10-25T19:53:45.203217: step 593, loss 0.13885971903800964, acc 0.8654194327097163, auc: 0.8873833730806243\n",
      "train: 2018-10-25T19:53:45.647171: step 594, loss 0.19865961372852325, acc 0.8200573065902579, auc: 0.8844486664230911\n",
      "train: 2018-10-25T19:53:46.064126: step 595, loss 0.23025548458099365, acc 0.8087400681044268, auc: 0.8628996007297918\n",
      "train: 2018-10-25T19:53:46.533389: step 596, loss 0.21707910299301147, acc 0.8350877192982457, auc: 0.872888956577656\n",
      "train: 2018-10-25T19:53:46.872491: step 597, loss 0.19034908711910248, acc 0.747, auc: 0.8359539633652131\n",
      "train: 2018-10-25T19:53:47.308793: step 598, loss 0.18403814733028412, acc 0.8112724167378309, auc: 0.8456762484701948\n",
      "train: 2018-10-25T19:53:47.675227: step 599, loss 0.16671530902385712, acc 0.872, auc: 0.8765863765863766\n",
      "train: 2018-10-25T19:53:48.160755: step 600, loss 0.16144639253616333, acc 0.792156862745098, auc: 0.8655518500538237\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:53:49.732891, step: 600, loss: 0.2305428018936744, acc: 0.818682849493912, auc: 0.8144377039207383\n",
      "Saved model checkpoint to model/my-model-600\n",
      "\n",
      "train: 2018-10-25T19:53:50.430303: step 601, loss 0.13828539848327637, acc 0.8274456521739131, auc: 0.8771182382797629\n",
      "train: 2018-10-25T19:53:51.016596: step 602, loss 0.12424110621213913, acc 0.8639269406392694, auc: 0.8929039330499184\n",
      "train: 2018-10-25T19:53:51.366392: step 603, loss 0.23189914226531982, acc 0.8263665594855305, auc: 0.8829351395730706\n",
      "train: 2018-10-25T19:53:51.680613: step 604, loss 0.18498925864696503, acc 0.8519938650306749, auc: 0.8773432075908354\n",
      "train: 2018-10-25T19:53:51.979469: step 605, loss 0.18827061355113983, acc 0.8775235531628532, auc: 0.925423810907682\n",
      "train: 2018-10-25T19:53:52.348396: step 606, loss 0.17437335848808289, acc 0.8832518337408313, auc: 0.878424039373044\n",
      "train: 2018-10-25T19:53:52.661107: step 607, loss 0.14891880750656128, acc 0.8895397489539749, auc: 0.9444169474532498\n",
      "train: 2018-10-25T19:53:53.103917: step 608, loss 0.21007902920246124, acc 0.8503787878787878, auc: 0.9191178038514847\n",
      "train: 2018-10-25T19:53:53.397522: step 609, loss 0.16409040987491608, acc 0.8852589641434263, auc: 0.9309442504944138\n",
      "train: 2018-10-25T19:53:53.764460: step 610, loss 0.20328602194786072, acc 0.879372738238842, auc: 0.873032708032708\n",
      "train: 2018-10-25T19:53:54.166377: step 611, loss 0.16542722284793854, acc 0.8691330756488128, auc: 0.8923461248531808\n",
      "train: 2018-10-25T19:53:54.410837: step 612, loss 0.14048884809017181, acc 0.8855975485188968, auc: 0.8856949367453388\n",
      "train: 2018-10-25T19:53:54.677808: step 613, loss 0.15716323256492615, acc 0.8538538538538538, auc: 0.9207748347565977\n",
      "train: 2018-10-25T19:53:54.896545: step 614, loss 0.1686786711215973, acc 0.8888024883359253, auc: 0.8615173674588664\n",
      "train: 2018-10-25T19:53:55.225242: step 615, loss 0.19001717865467072, acc 0.8563419761737912, auc: 0.8747470744696819\n",
      "train: 2018-10-25T19:53:55.447435: step 616, loss 0.21882560849189758, acc 0.8238884045335658, auc: 0.8899881226856705\n",
      "train: 2018-10-25T19:53:55.945858: step 617, loss 0.14998076856136322, acc 0.8479166666666667, auc: 0.8701159951159951\n",
      "train: 2018-10-25T19:53:56.197823: step 618, loss 0.24411679804325104, acc 0.8426966292134831, auc: 0.8867677486098539\n",
      "train: 2018-10-25T19:53:56.902699: step 619, loss 0.1353800892829895, acc 0.8391484328799527, auc: 0.9196558505408062\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:53:57.199739: step 620, loss 0.1731693297624588, acc 0.911620294599018, auc: 0.9183915862006438\n",
      "train: 2018-10-25T19:53:57.501023: step 621, loss 0.18130500614643097, acc 0.8875614898102601, auc: 0.8745383057309662\n",
      "train: 2018-10-25T19:53:57.958288: step 622, loss 0.17037518322467804, acc 0.8635265700483091, auc: 0.8868365024594376\n",
      "train: 2018-10-25T19:53:58.508403: step 623, loss 0.1358027458190918, acc 0.8482084690553746, auc: 0.8819978814857613\n",
      "train: 2018-10-25T19:53:58.788139: step 624, loss 0.17110413312911987, acc 0.8545454545454545, auc: 0.9176759620034349\n",
      "train: 2018-10-25T19:53:59.182990: step 625, loss 0.12518854439258575, acc 0.8940092165898618, auc: 0.9204758254289713\n",
      "train: 2018-10-25T19:53:59.538632: step 626, loss 0.19260793924331665, acc 0.8539119804400978, auc: 0.8628052020239176\n",
      "train: 2018-10-25T19:53:59.972447: step 627, loss 0.1517626941204071, acc 0.8771498771498771, auc: 0.9399569892473119\n",
      "train: 2018-10-25T19:54:00.194583: step 628, loss 0.10984721034765244, acc 0.9291338582677166, auc: 0.9541334423771554\n",
      "train: 2018-10-25T19:54:00.660548: step 629, loss 0.1216897964477539, acc 0.8900302114803625, auc: 0.9144845757933385\n",
      "train: 2018-10-25T19:54:00.986387: step 630, loss 0.1824912577867508, acc 0.883934890304317, auc: 0.8774720876633681\n",
      "train: 2018-10-25T19:54:01.359615: step 631, loss 0.18203726410865784, acc 0.8293436293436294, auc: 0.8298423847150801\n",
      "train: 2018-10-25T19:54:01.720250: step 632, loss 0.1601819396018982, acc 0.8783140720598233, auc: 0.8789934990629003\n",
      "train: 2018-10-25T19:54:01.971108: step 633, loss 0.2878318130970001, acc 0.8233644859813084, auc: 0.8038253275993943\n",
      "train: 2018-10-25T19:54:02.315139: step 634, loss 0.15044616162776947, acc 0.9196428571428571, auc: 0.9225096526539658\n",
      "train: 2018-10-25T19:54:02.629922: step 635, loss 0.1997944712638855, acc 0.8402607986960066, auc: 0.8679467834958372\n",
      "train: 2018-10-25T19:54:02.999464: step 636, loss 0.1891641765832901, acc 0.8750902527075812, auc: 0.9077341770751514\n",
      "train: 2018-10-25T19:54:03.706837: step 637, loss 0.16071733832359314, acc 0.7755644090305445, auc: 0.8212164073550212\n",
      "train: 2018-10-25T19:54:04.019613: step 638, loss 0.19384190440177917, acc 0.8390501319261213, auc: 0.8894750534774817\n",
      "train: 2018-10-25T19:54:04.939299: step 639, loss 0.12169329822063446, acc 0.8596724214254094, auc: 0.931067327056006\n",
      "train: 2018-10-25T19:54:05.288983: step 640, loss 0.1634683907032013, acc 0.8522267206477733, auc: 0.9164367139649949\n",
      "train: 2018-10-25T19:54:05.842843: step 641, loss 0.18530216813087463, acc 0.8323662153449387, auc: 0.8611455399061033\n",
      "train: 2018-10-25T19:54:06.222190: step 642, loss 0.169279545545578, acc 0.8492550394390885, auc: 0.8948271026165004\n",
      "train: 2018-10-25T19:54:06.636635: step 643, loss 0.19276516139507294, acc 0.8196825396825397, auc: 0.8938981971389378\n",
      "train: 2018-10-25T19:54:07.115778: step 644, loss 0.19938528537750244, acc 0.7935691318327974, auc: 0.8421246414261434\n",
      "train: 2018-10-25T19:54:07.610637: step 645, loss 0.17914259433746338, acc 0.8143360752056404, auc: 0.8863213754739395\n",
      "train: 2018-10-25T19:54:08.144803: step 646, loss 0.1643277108669281, acc 0.8675115207373272, auc: 0.9092314330017971\n",
      "train: 2018-10-25T19:54:08.455962: step 647, loss 0.1663544923067093, acc 0.8045801526717558, auc: 0.8537015396600857\n",
      "train: 2018-10-25T19:54:08.666664: step 648, loss 0.23541030287742615, acc 0.8416075650118203, auc: 0.8746363938765545\n",
      "train: 2018-10-25T19:54:09.008944: step 649, loss 0.18049028515815735, acc 0.8426171529619806, auc: 0.9019002239753484\n",
      "train: 2018-10-25T19:54:09.703991: step 650, loss 0.14535203576087952, acc 0.8257527226137091, auc: 0.8772256567271876\n",
      "train: 2018-10-25T19:54:10.057296: step 651, loss 0.10911140590906143, acc 0.9406276505513147, auc: 0.9590032436223371\n",
      "train: 2018-10-25T19:54:10.433673: step 652, loss 0.1752997636795044, acc 0.8762214983713354, auc: 0.8491700911407516\n",
      "train: 2018-10-25T19:54:10.837425: step 653, loss 0.16826245188713074, acc 0.8808058198097369, auc: 0.8789763434915808\n",
      "train: 2018-10-25T19:54:11.108816: step 654, loss 0.23434574902057648, acc 0.8548790658882403, auc: 0.8836343587208639\n",
      "train: 2018-10-25T19:54:11.520836: step 655, loss 0.12966322898864746, acc 0.8810606060606061, auc: 0.9292035398230087\n",
      "train: 2018-10-25T19:54:11.816561: step 656, loss 0.1615952104330063, acc 0.8840245775729647, auc: 0.8848997218562437\n",
      "train: 2018-10-25T19:54:12.138185: step 657, loss 0.1655278354883194, acc 0.901320361362057, auc: 0.9183345035635062\n",
      "train: 2018-10-25T19:54:12.431155: step 658, loss 0.15978682041168213, acc 0.8788410886742757, auc: 0.9048600393682596\n",
      "train: 2018-10-25T19:54:12.759357: step 659, loss 0.1592557430267334, acc 0.8597560975609756, auc: 0.9214459732244368\n",
      "train: 2018-10-25T19:54:13.130598: step 660, loss 0.17298738658428192, acc 0.8539325842696629, auc: 0.8772365141534628\n",
      "train: 2018-10-25T19:54:13.421053: step 661, loss 0.15278221666812897, acc 0.8910256410256411, auc: 0.9153827341648324\n",
      "train: 2018-10-25T19:54:13.765380: step 662, loss 0.18099211156368256, acc 0.8341836734693877, auc: 0.8846581367589772\n",
      "train: 2018-10-25T19:54:14.208268: step 663, loss 0.1675797998905182, acc 0.867515923566879, auc: 0.9181335078116273\n",
      "train: 2018-10-25T19:54:14.656706: step 664, loss 0.17777854204177856, acc 0.8145604395604396, auc: 0.8315617007672634\n",
      "train: 2018-10-25T19:54:15.015865: step 665, loss 0.11564410477876663, acc 0.8669467787114846, auc: 0.9180815647107783\n",
      "train: 2018-10-25T19:54:15.426808: step 666, loss 0.17619214951992035, acc 0.8462109955423477, auc: 0.9023444571842375\n",
      "train: 2018-10-25T19:54:16.005485: step 667, loss 0.12266553193330765, acc 0.8988066825775657, auc: 0.9281533879374533\n",
      "train: 2018-10-25T19:54:16.482105: step 668, loss 0.17159384489059448, acc 0.8597883597883598, auc: 0.8908484143120602\n",
      "train: 2018-10-25T19:54:16.886566: step 669, loss 0.16245734691619873, acc 0.8654217643271088, auc: 0.8617363344051447\n",
      "train: 2018-10-25T19:54:17.304161: step 670, loss 0.17225579917430878, acc 0.8690148596587782, auc: 0.8624579922539212\n",
      "train: 2018-10-25T19:54:17.634162: step 671, loss 0.14630569517612457, acc 0.8947368421052632, auc: 0.9041345015391902\n",
      "train: 2018-10-25T19:54:18.077967: step 672, loss 0.22093921899795532, acc 0.8399002493765586, auc: 0.9038993371569151\n",
      "train: 2018-10-25T19:54:18.397576: step 673, loss 0.17642249166965485, acc 0.8791858174655286, auc: 0.921702026288501\n",
      "train: 2018-10-25T19:54:18.896168: step 674, loss 0.12237012386322021, acc 0.8697378872120731, auc: 0.9124829858165822\n",
      "train: 2018-10-25T19:54:19.246797: step 675, loss 0.20851539075374603, acc 0.827212020033389, auc: 0.9039917106060041\n",
      "train: 2018-10-25T19:54:19.597711: step 676, loss 0.16835609078407288, acc 0.8393213572854291, auc: 0.8990016152867994\n",
      "train: 2018-10-25T19:54:20.497459: step 677, loss 0.1060769185423851, acc 0.8190416141235813, auc: 0.9028500877875141\n",
      "train: 2018-10-25T19:54:20.952503: step 678, loss 0.14880459010601044, acc 0.8532448377581121, auc: 0.9039060358532975\n",
      "train: 2018-10-25T19:54:21.306360: step 679, loss 0.12933094799518585, acc 0.8767123287671232, auc: 0.9250404074288677\n",
      "train: 2018-10-25T19:54:21.627312: step 680, loss 0.1653047800064087, acc 0.8981026001405481, auc: 0.8817867382696947\n",
      "train: 2018-10-25T19:54:22.081557: step 681, loss 0.2126379758119583, acc 0.8302165354330708, auc: 0.8880700905870511\n",
      "train: 2018-10-25T19:54:22.483758: step 682, loss 0.15209664404392242, acc 0.8240270727580372, auc: 0.8921338155515369\n",
      "train: 2018-10-25T19:54:22.815119: step 683, loss 0.15552276372909546, acc 0.895330112721417, auc: 0.8654912161009721\n",
      "train: 2018-10-25T19:54:23.143982: step 684, loss 0.1422078162431717, acc 0.8824130879345603, auc: 0.9025796224239804\n",
      "train: 2018-10-25T19:54:23.492744: step 685, loss 0.18410231173038483, acc 0.8626543209876543, auc: 0.9048628302569671\n",
      "train: 2018-10-25T19:54:23.886076: step 686, loss 0.19501841068267822, acc 0.8267762771579565, auc: 0.8881373144465561\n",
      "train: 2018-10-25T19:54:24.202647: step 687, loss 0.15346916019916534, acc 0.872444011684518, auc: 0.9129728987454758\n",
      "train: 2018-10-25T19:54:24.692826: step 688, loss 0.15646196901798248, acc 0.8445523941707148, auc: 0.8772311479395437\n",
      "train: 2018-10-25T19:54:25.150061: step 689, loss 0.15257468819618225, acc 0.8649068322981367, auc: 0.8934097835224885\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:54:25.487807: step 690, loss 0.2039531022310257, acc 0.8913857677902621, auc: 0.9396717092642931\n",
      "train: 2018-10-25T19:54:26.196847: step 691, loss 0.11873752623796463, acc 0.8509705698184096, auc: 0.9243834107079272\n",
      "train: 2018-10-25T19:54:26.433168: step 692, loss 0.17917697131633759, acc 0.8704974271012007, auc: 0.8837047912454671\n",
      "train: 2018-10-25T19:54:26.773420: step 693, loss 0.17377030849456787, acc 0.8628691983122363, auc: 0.8913764813126709\n",
      "train: 2018-10-25T19:54:27.167873: step 694, loss 0.15853343904018402, acc 0.8768115942028986, auc: 0.9138911252425016\n",
      "train: 2018-10-25T19:54:27.562693: step 695, loss 0.2330554872751236, acc 0.8029336734693877, auc: 0.8560185052974271\n",
      "train: 2018-10-25T19:54:28.066648: step 696, loss 0.13150537014007568, acc 0.8372930165586753, auc: 0.8990731582225154\n",
      "train: 2018-10-25T19:54:28.623703: step 697, loss 0.17580658197402954, acc 0.8180048661800486, auc: 0.8865204434474885\n",
      "train: 2018-10-25T19:54:29.168945: step 698, loss 0.13114319741725922, acc 0.8457337883959044, auc: 0.8783751440908114\n",
      "train: 2018-10-25T19:54:29.495666: step 699, loss 0.21492858231067657, acc 0.7803951367781155, auc: 0.8410263640208657\n",
      "train: 2018-10-25T19:54:30.029329: step 700, loss 0.17630484700202942, acc 0.8128544423440454, auc: 0.872169664605911\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:54:31.781393, step: 700, loss: 0.22648925735400274, acc: 0.8171938331935026, auc: 0.8136969775418905\n",
      "Saved model checkpoint to model/my-model-700\n",
      "\n",
      "train: 2018-10-25T19:54:32.439960: step 701, loss 0.16810180246829987, acc 0.8297232250300842, auc: 0.8643098956361048\n",
      "train: 2018-10-25T19:54:32.929163: step 702, loss 0.13277074694633484, acc 0.8471940500338067, auc: 0.9291256396519555\n",
      "train: 2018-10-25T19:54:33.182391: step 703, loss 0.13374245166778564, acc 0.8910386965376782, auc: 0.8890918746680828\n",
      "train: 2018-10-25T19:54:33.586720: step 704, loss 0.1509140133857727, acc 0.8514246947082768, auc: 0.8832283394920983\n",
      "train: 2018-10-25T19:54:33.902951: step 705, loss 0.20018303394317627, acc 0.7996340347666971, auc: 0.8795090560717256\n",
      "train: 2018-10-25T19:54:34.362324: step 706, loss 0.15698112547397614, acc 0.8461007591442374, auc: 0.8944536365966769\n",
      "train: 2018-10-25T19:54:34.870784: step 707, loss 0.1724139004945755, acc 0.8282051282051283, auc: 0.8810135543838289\n",
      "train: 2018-10-25T19:54:35.280512: step 708, loss 0.13296213746070862, acc 0.852165725047081, auc: 0.8804709782050565\n",
      "train: 2018-10-25T19:54:35.700778: step 709, loss 0.16395731270313263, acc 0.8476775956284153, auc: 0.8993250096177386\n",
      "train: 2018-10-25T19:54:36.178845: step 710, loss 0.15029111504554749, acc 0.8729216152019003, auc: 0.9012490829223896\n",
      "train: 2018-10-25T19:54:36.695697: step 711, loss 0.13004064559936523, acc 0.8910323253388946, auc: 0.9509609541832323\n",
      "train: 2018-10-25T19:54:37.090368: step 712, loss 0.17504678666591644, acc 0.8285953177257525, auc: 0.8912753555304007\n",
      "train: 2018-10-25T19:54:37.381300: step 713, loss 0.20998795330524445, acc 0.8402107111501317, auc: 0.8993087172949675\n",
      "train: 2018-10-25T19:54:37.726187: step 714, loss 0.12947170436382294, acc 0.9021459227467811, auc: 0.9076635709340752\n",
      "train: 2018-10-25T19:54:38.092304: step 715, loss 0.1399330496788025, acc 0.8880208333333334, auc: 0.9123486467236468\n",
      "train: 2018-10-25T19:54:38.547820: step 716, loss 0.1571664810180664, acc 0.8818031885651457, auc: 0.9270051396496923\n",
      "train: 2018-10-25T19:54:38.931139: step 717, loss 0.17397604882717133, acc 0.8361111111111111, auc: 0.8773137973137973\n",
      "train: 2018-10-25T19:54:39.294952: step 718, loss 0.224358931183815, acc 0.8665893271461717, auc: 0.8999948196767862\n",
      "train: 2018-10-25T19:54:39.599319: step 719, loss 0.16589024662971497, acc 0.8786217697729053, auc: 0.9264323950190816\n",
      "train: 2018-10-25T19:54:40.155589: step 720, loss 0.1434011161327362, acc 0.8772311710927296, auc: 0.8947883060749118\n",
      "train: 2018-10-25T19:54:40.543632: step 721, loss 0.11633773893117905, acc 0.840956340956341, auc: 0.8734233249841702\n",
      "train: 2018-10-25T19:54:40.918326: step 722, loss 0.2353161722421646, acc 0.857889237199582, auc: 0.9117576243980738\n",
      "train: 2018-10-25T19:54:41.239092: step 723, loss 0.16156896948814392, acc 0.8654708520179372, auc: 0.9178204084094954\n",
      "train: 2018-10-25T19:54:41.655677: step 724, loss 0.1686471700668335, acc 0.8067010309278351, auc: 0.8727855813392965\n",
      "train: 2018-10-25T19:54:41.972939: step 725, loss 0.14173857867717743, acc 0.9120622568093385, auc: 0.8713460779742477\n",
      "train: 2018-10-25T19:54:42.821636: step 726, loss 0.12174094468355179, acc 0.8036093418259024, auc: 0.8914284551043397\n",
      "train: 2018-10-25T19:54:43.127099: step 727, loss 0.13674131035804749, acc 0.8468677494199536, auc: 0.9243820122229317\n",
      "train: 2018-10-25T19:54:43.621304: step 728, loss 0.14082849025726318, acc 0.8740105540897097, auc: 0.9287345849738067\n",
      "train: 2018-10-25T19:54:44.042765: step 729, loss 0.19467860460281372, acc 0.8592896174863388, auc: 0.9032889520864955\n",
      "train: 2018-10-25T19:54:44.493487: step 730, loss 0.14894947409629822, acc 0.8998881431767338, auc: 0.9141336164964213\n",
      "train: 2018-10-25T19:54:45.105144: step 731, loss 0.10118784010410309, acc 0.8685800604229608, auc: 0.930281365586188\n",
      "train: 2018-10-25T19:54:45.394898: step 732, loss 0.13200044631958008, acc 0.9020522388059702, auc: 0.8913421775234063\n",
      "train: 2018-10-25T19:54:45.689091: step 733, loss 0.17009367048740387, acc 0.871313672922252, auc: 0.938747469460331\n",
      "train: 2018-10-25T19:54:46.048807: step 734, loss 0.17672525346279144, acc 0.8854688618468146, auc: 0.8645486894434495\n",
      "train: 2018-10-25T19:54:46.394881: step 735, loss 0.11498695611953735, acc 0.9227674979887369, auc: 0.8854162433109801\n",
      "train: 2018-10-25T19:54:46.632994: step 736, loss 0.23998674750328064, acc 0.8303212851405622, auc: 0.8446589481002165\n",
      "train: 2018-10-25T19:54:46.923690: step 737, loss 0.18737301230430603, acc 0.8302055406613047, auc: 0.9013852704242432\n",
      "train: 2018-10-25T19:54:47.479694: step 738, loss 0.11733675003051758, acc 0.8572479764532744, auc: 0.8991616311965854\n",
      "train: 2018-10-25T19:54:48.051382: step 739, loss 0.1448279321193695, acc 0.8512709572742023, auc: 0.8851323665930408\n",
      "train: 2018-10-25T19:54:48.381372: step 740, loss 0.17323504388332367, acc 0.8609539207760711, auc: 0.918339403413658\n",
      "train: 2018-10-25T19:54:48.740117: step 741, loss 0.19379450380802155, acc 0.8473648186173853, auc: 0.8924503982407548\n",
      "train: 2018-10-25T19:54:49.179157: step 742, loss 0.15423299372196198, acc 0.8691860465116279, auc: 0.9320136566893606\n",
      "train: 2018-10-25T19:54:49.727320: step 743, loss 0.2729017436504364, acc 0.8272113943028486, auc: 0.8872823787074502\n",
      "train: 2018-10-25T19:54:50.153290: step 744, loss 0.11604207009077072, acc 0.8504486540378864, auc: 0.9275412363683087\n",
      "train: 2018-10-25T19:54:50.478785: step 745, loss 0.11784467846155167, acc 0.8925233644859814, auc: 0.8936492673992675\n",
      "train: 2018-10-25T19:54:50.832860: step 746, loss 0.17908495664596558, acc 0.8535598705501618, auc: 0.886813163541902\n",
      "train: 2018-10-25T19:54:51.174338: step 747, loss 0.17389458417892456, acc 0.8512720156555773, auc: 0.917821275804352\n",
      "train: 2018-10-25T19:54:51.609133: step 748, loss 0.20056159794330597, acc 0.8411697247706422, auc: 0.879845490580894\n",
      "train: 2018-10-25T19:54:52.139561: step 749, loss 0.17366589605808258, acc 0.8229166666666666, auc: 0.8991062969556166\n",
      "train: 2018-10-25T19:54:52.475542: step 750, loss 0.17267990112304688, acc 0.8990885416666666, auc: 0.8765567075996293\n",
      "train: 2018-10-25T19:54:52.896332: step 751, loss 0.19340699911117554, acc 0.8483685220729367, auc: 0.9172192543713876\n",
      "train: 2018-10-25T19:54:53.324483: step 752, loss 0.19848619401454926, acc 0.8370221327967807, auc: 0.9030705460794357\n",
      "train: 2018-10-25T19:54:53.634737: step 753, loss 0.15491162240505219, acc 0.9133697135061392, auc: 0.8621872103799815\n",
      "train: 2018-10-25T19:54:54.131916: step 754, loss 0.1104145422577858, acc 0.8278965129358831, auc: 0.8856867931019653\n",
      "train: 2018-10-25T19:54:54.725579: step 755, loss 0.112158864736557, acc 0.8490427098674521, auc: 0.9004149152836157\n",
      "train: 2018-10-25T19:54:55.135205: step 756, loss 0.15814615786075592, acc 0.8866498740554156, auc: 0.9031744567580745\n",
      "train: 2018-10-25T19:54:55.611093: step 757, loss 0.14201931655406952, acc 0.8996763754045307, auc: 0.9049355253006683\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train: 2018-10-25T19:54:55.982648: step 758, loss 0.13239185512065887, acc 0.9162264150943397, auc: 0.9266345334120271\n",
      "train: 2018-10-25T19:54:56.312374: step 759, loss 0.15930967032909393, acc 0.8813920454545454, auc: 0.9374420316877504\n",
      "train: 2018-10-25T19:54:56.796604: step 760, loss 0.11861255019903183, acc 0.8808888888888889, auc: 0.9157937147461724\n",
      "train: 2018-10-25T19:54:57.113950: step 761, loss 0.14390060305595398, acc 0.9097496706192358, auc: 0.9222820105173046\n",
      "train: 2018-10-25T19:54:57.504950: step 762, loss 0.1822786182165146, acc 0.810905892700088, auc: 0.890052019780361\n",
      "train: 2018-10-25T19:54:58.068728: step 763, loss 0.1429971605539322, acc 0.8249084249084249, auc: 0.8804738793872092\n",
      "train: 2018-10-25T19:54:58.513529: step 764, loss 0.15866243839263916, acc 0.8579846788450206, auc: 0.9047275110015711\n",
      "train: 2018-10-25T19:54:59.039733: step 765, loss 0.15494102239608765, acc 0.8337547408343868, auc: 0.9071478074596773\n",
      "train: 2018-10-25T19:54:59.351942: step 766, loss 0.1814350038766861, acc 0.845360824742268, auc: 0.9205488139311668\n",
      "train: 2018-10-25T19:54:59.989269: step 767, loss 0.1321433037519455, acc 0.8726079825041007, auc: 0.9097385233405125\n",
      "train: 2018-10-25T19:55:00.277215: step 768, loss 0.13998134434223175, acc 0.910828025477707, auc: 0.934122880177926\n",
      "train: 2018-10-25T19:55:00.870738: step 769, loss 0.11823733150959015, acc 0.8924611973392461, auc: 0.9600214475062262\n",
      "train: 2018-10-25T19:55:01.283022: step 770, loss 0.26070114970207214, acc 0.8241118229470006, auc: 0.8926371224165341\n",
      "train: 2018-10-25T19:55:01.900212: step 771, loss 0.1145063117146492, acc 0.8454363757495004, auc: 0.9223568497166308\n",
      "train: 2018-10-25T19:55:02.286536: step 772, loss 0.1416444331407547, acc 0.83690587138863, auc: 0.8769693679019049\n",
      "train: 2018-10-25T19:55:02.623519: step 773, loss 0.1093793511390686, acc 0.8927038626609443, auc: 0.9170194003527338\n",
      "train: 2018-10-25T19:55:02.976821: step 774, loss 0.14156658947467804, acc 0.8599221789883269, auc: 0.9298163039079408\n",
      "train: 2018-10-25T19:55:03.316188: step 775, loss 0.14544816315174103, acc 0.9247027741083224, auc: 0.9449795031362671\n",
      "train: 2018-10-25T19:55:03.838954: step 776, loss 0.13516992330551147, acc 0.8507281553398058, auc: 0.9110612097881634\n",
      "train: 2018-10-25T19:55:04.342809: step 777, loss 0.12151330709457397, acc 0.8979591836734694, auc: 0.9297598645932508\n",
      "train: 2018-10-25T19:55:04.592058: step 778, loss 0.16484147310256958, acc 0.8621523579201935, auc: 0.8921725993768294\n",
      "train: 2018-10-25T19:55:04.953771: step 779, loss 0.1396644562482834, acc 0.918954248366013, auc: 0.8944832944832946\n",
      "train: 2018-10-25T19:55:05.351643: step 780, loss 0.17214398086071014, acc 0.8490208464939988, auc: 0.9162623660231315\n",
      "train: 2018-10-25T19:55:05.687361: step 781, loss 0.14686138927936554, acc 0.9008498583569405, auc: 0.9501448238870016\n",
      "train: 2018-10-25T19:55:06.165900: step 782, loss 0.13509899377822876, acc 0.8641265276779295, auc: 0.8745546238548536\n",
      "train: 2018-10-25T19:55:06.463767: step 783, loss 0.13135674595832825, acc 0.8846918489065606, auc: 0.8650341728372832\n",
      "train: 2018-10-25T19:55:06.735710: step 784, loss 0.20368348062038422, acc 0.861003861003861, auc: 0.8633927639719146\n",
      "train: 2018-10-25T19:55:07.060277: step 785, loss 0.1374935507774353, acc 0.8734299516908213, auc: 0.8866963598918141\n",
      "train: 2018-10-25T19:55:07.346824: step 786, loss 0.1768869161605835, acc 0.8737517831669044, auc: 0.9302530871503532\n",
      "train: 2018-10-25T19:55:07.734973: step 787, loss 0.204156756401062, acc 0.8604260089686099, auc: 0.9223130866516239\n",
      "train: 2018-10-25T19:55:08.088433: step 788, loss 0.16260220110416412, acc 0.8575174825174825, auc: 0.9065687522133881\n",
      "train: 2018-10-25T19:55:09.059867: step 789, loss 0.13371285796165466, acc 0.8253833049403748, auc: 0.8872397730779876\n",
      "train: 2018-10-25T19:55:09.365116: step 790, loss 0.1607494354248047, acc 0.887719298245614, auc: 0.9113023952095809\n",
      "train: 2018-10-25T19:55:09.585832: step 791, loss 0.1932719498872757, acc 0.8371824480369515, auc: 0.9073305748154775\n",
      "train: 2018-10-25T19:55:10.107879: step 792, loss 0.1409904807806015, acc 0.8590694538098449, auc: 0.9280771347770798\n",
      "train: 2018-10-25T19:55:10.535170: step 793, loss 0.13702480494976044, acc 0.8386850152905199, auc: 0.8855334051724137\n",
      "train: 2018-10-25T19:55:10.858213: step 794, loss 0.16430751979351044, acc 0.8772619984264359, auc: 0.8955133650145014\n",
      "train: 2018-10-25T19:55:11.241275: step 795, loss 0.18802903592586517, acc 0.8618867924528302, auc: 0.8957202548281206\n",
      "train: 2018-10-25T19:55:11.647412: step 796, loss 0.1647878736257553, acc 0.8812067881835324, auc: 0.9120494312722071\n",
      "train: 2018-10-25T19:55:11.956072: step 797, loss 0.18877176940441132, acc 0.85790273556231, auc: 0.8662846330780425\n",
      "train: 2018-10-25T19:55:12.624384: step 798, loss 0.13592667877674103, acc 0.8617125984251969, auc: 0.9156869968180752\n",
      "train: 2018-10-25T19:55:13.081673: step 799, loss 0.17562828958034515, acc 0.8219927095990279, auc: 0.8873642658906583\n",
      "train: 2018-10-25T19:55:13.299645: step 800, loss 0.2008550465106964, acc 0.8807174887892377, auc: 0.8774440901870708\n",
      "\n",
      "Evaluation:\n",
      "dev: 2018-10-25T19:55:15.168908, step: 800, loss: 0.22469853437863863, acc: 0.815617302576507, auc: 0.8144677839180899\n",
      "Saved model checkpoint to model/my-model-800\n",
      "\n",
      "train: 2018-10-25T19:55:15.657301: step 801, loss 0.17785201966762543, acc 0.8713025428126622, auc: 0.8923966009488148\n",
      "train: 2018-10-25T19:55:15.951127: step 802, loss 0.15464939177036285, acc 0.9015302727877578, auc: 0.9454829204409037\n",
      "train: 2018-10-25T19:55:16.666315: step 803, loss 0.1488107442855835, acc 0.8267015706806282, auc: 0.8607723281052269\n",
      "train: 2018-10-25T19:55:17.174474: step 804, loss 0.17982912063598633, acc 0.8266579120157583, auc: 0.8813936113626306\n",
      "train: 2018-10-25T19:55:17.499384: step 805, loss 0.16968853771686554, acc 0.8359154929577465, auc: 0.9125948269485245\n",
      "train: 2018-10-25T19:55:17.895644: step 806, loss 0.14830297231674194, acc 0.8428571428571429, auc: 0.8736382488789539\n",
      "train: 2018-10-25T19:55:18.330044: step 807, loss 0.1625484824180603, acc 0.8314917127071824, auc: 0.9045533029311655\n",
      "train: 2018-10-25T19:55:18.667694: step 808, loss 0.1832890659570694, acc 0.8466257668711656, auc: 0.8790198492075705\n",
      "train: 2018-10-25T19:55:18.909136: step 809, loss 0.1568915992975235, acc 0.8208469055374593, auc: 0.8377943394037815\n",
      "train: 2018-10-25T19:55:19.271561: step 810, loss 0.2213444858789444, acc 0.832089552238806, auc: 0.886783109865511\n"
     ]
    }
   ],
   "source": [
    "# 训练模型\n",
    "def mean(item):\n",
    "    return sum(item) / len(item)\n",
    "\n",
    "\n",
    "def gen_metrics(sequence_len, binary_pred, pred, target_correctness):\n",
    "    \"\"\"\n",
    "    生成auc和accuracy的指标值\n",
    "    :param sequence_len: 每一个batch中各序列的长度组成的列表\n",
    "    :param binary_pred:\n",
    "    :param pred:\n",
    "    :param target_correctness:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    binary_preds = []\n",
    "    preds = []\n",
    "    target_correctnesses = []\n",
    "    for seq_idx, seq_len in enumerate(sequence_len):\n",
    "        binary_preds.append(binary_pred[seq_idx, :seq_len])\n",
    "        preds.append(pred[seq_idx, :seq_len])\n",
    "        target_correctnesses.append(target_correctness[seq_idx, :seq_len])\n",
    "\n",
    "    new_binary_pred = np.concatenate(binary_preds)\n",
    "    new_pred = np.concatenate(preds)\n",
    "    new_target_correctness = np.concatenate(target_correctnesses)\n",
    "\n",
    "    auc = roc_auc_score(new_target_correctness, new_pred)\n",
    "    accuracy = accuracy_score(new_target_correctness, new_binary_pred)\n",
    "\n",
    "    return auc, accuracy\n",
    "\n",
    "\n",
    "class DKTEngine(object):\n",
    "\n",
    "    def __init__(self):\n",
    "        self.config = Config()\n",
    "        self.train_dkt = None\n",
    "        self.test_dkt = None\n",
    "        self.sess = None\n",
    "        self.global_step = 0\n",
    "\n",
    "    def add_gradient_noise(self, grad, stddev=1e-3, name=None):\n",
    "        \"\"\"\n",
    "        Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].\n",
    "        \"\"\"\n",
    "        with tf.op_scope([grad, stddev], name, \"add_gradient_noise\") as name:\n",
    "            grad = tf.convert_to_tensor(grad, name=\"grad\")\n",
    "            gn = tf.random_normal(tf.shape(grad), stddev=stddev)\n",
    "            return tf.add(grad, gn, name=name)\n",
    "\n",
    "    def train_step(self, params, train_op, train_summary_op, train_summary_writer):\n",
    "        \"\"\"\n",
    "        A single training step\n",
    "        \"\"\"\n",
    "        dkt = self.train_dkt\n",
    "        sess = self.sess\n",
    "        global_step = self.global_step\n",
    "\n",
    "        feed_dict = {dkt.input_data: params['input_x'],\n",
    "                     dkt.target_id: params['target_id'],\n",
    "                     dkt.target_correctness: params['target_correctness'],\n",
    "                     dkt.max_steps: params['max_len'],\n",
    "                     dkt.sequence_len: params['seq_len'],\n",
    "                     dkt.keep_prob: self.config.modelConfig.dropout_keep_prob}\n",
    "\n",
    "        _, step, summaries, loss, binary_pred, pred, target_correctness = sess.run(\n",
    "            [train_op, global_step, train_summary_op, dkt.loss, dkt.binary_pred, dkt.pred, dkt.target_correctness],\n",
    "            feed_dict)\n",
    "\n",
    "        auc, accuracy = gen_metrics(params['seq_len'], binary_pred, pred, target_correctness)\n",
    "\n",
    "        time_str = datetime.datetime.now().isoformat()\n",
    "        print(\"train: {}: step {}, loss {}, acc {}, auc: {}\".format(time_str, step, loss, accuracy, auc))\n",
    "        train_summary_writer.add_summary(summaries, step)\n",
    "\n",
    "    def dev_step(self, params, dev_summary_op, writer=None):\n",
    "        \"\"\"\n",
    "        Evaluates model on a dev set\n",
    "        \"\"\"\n",
    "        dkt = self.test_dkt\n",
    "        sess = self.sess\n",
    "        global_step = self.global_step\n",
    "\n",
    "        feed_dict = {dkt.input_data: params['input_x'],\n",
    "                     dkt.target_id: params['target_id'],\n",
    "                     dkt.target_correctness: params['target_correctness'],\n",
    "                     dkt.max_steps: params['max_len'],\n",
    "                     dkt.sequence_len: params['seq_len'],\n",
    "                     dkt.keep_prob: 1.0}\n",
    "        step, summaries, loss, pred, binary_pred, target_correctness = sess.run(\n",
    "            [global_step, dev_summary_op, dkt.loss, dkt.pred, dkt.binary_pred, dkt.target_correctness],\n",
    "            feed_dict)\n",
    "\n",
    "        auc, accuracy = gen_metrics(params['seq_len'], binary_pred, pred, target_correctness)\n",
    "        # precision, recall, f_score = precision_recall_fscore_support(target_correctness, binary_pred)\n",
    "\n",
    "        if writer:\n",
    "            writer.add_summary(summaries, step)\n",
    "\n",
    "        return loss, accuracy, auc\n",
    "\n",
    "    def run_epoch(self, fileName):\n",
    "        \"\"\"\n",
    "        训练模型\n",
    "        :param filePath:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "\n",
    "        # 实例化配置参数对象\n",
    "        config = Config()\n",
    "\n",
    "        # 实例化数据生成对象\n",
    "        dataGen = DataGenerator(fileName, config)\n",
    "        dataGen.gen_attr()  # 生成训练集和测试集\n",
    "\n",
    "        train_seqs = dataGen.train_seqs\n",
    "        test_seqs = dataGen.test_seqs\n",
    "\n",
    "        session_conf = tf.ConfigProto(\n",
    "            allow_soft_placement=True,\n",
    "            log_device_placement=False\n",
    "        )\n",
    "        sess = tf.Session(config=session_conf)\n",
    "        self.sess = sess\n",
    "\n",
    "        with sess.as_default():\n",
    "            # 实例化dkt模型对象\n",
    "            with tf.name_scope(\"train\"):\n",
    "                with tf.variable_scope(\"dkt\", reuse=None):\n",
    "                    train_dkt = TensorFlowDKT(config)\n",
    "\n",
    "            with tf.name_scope(\"test\"):\n",
    "                with tf.variable_scope(\"dkt\", reuse=True):\n",
    "                    test_dkt = TensorFlowDKT(config)\n",
    "\n",
    "            self.train_dkt = train_dkt\n",
    "            self.test_dkt = test_dkt\n",
    "\n",
    "            global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n",
    "            self.global_step = global_step\n",
    "\n",
    "            # 定义一个优化器\n",
    "            optimizer = tf.train.AdamOptimizer(config.trainConfig.learning_rate)\n",
    "            grads_and_vars = optimizer.compute_gradients(train_dkt.loss)\n",
    "\n",
    "            # 对梯度进行截断，并且加上梯度噪音\n",
    "            grads_and_vars = [(tf.clip_by_norm(g, config.trainConfig.max_grad_norm), v)\n",
    "                              for g, v in grads_and_vars if g is not None]\n",
    "            # grads_and_vars = [(self.add_gradient_noise(g), v) for g, v in grads_and_vars]\n",
    "\n",
    "            # 定义图中最后的节点\n",
    "            train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step, name=\"train_op\")\n",
    "\n",
    "            # 保存各种变量或结果的值\n",
    "            grad_summaries = []\n",
    "            for g, v in grads_and_vars:\n",
    "                if g is not None:\n",
    "                    grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n",
    "                    sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n",
    "                    grad_summaries.append(grad_hist_summary)\n",
    "                    grad_summaries.append(sparsity_summary)\n",
    "            grad_summaries_merged = tf.summary.merge(grad_summaries)\n",
    "\n",
    "            timestamp = str(int(time.time()))\n",
    "            out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n",
    "            print(\"writing to {}\".format(out_dir))\n",
    "\n",
    "            # 训练时的 Summaries\n",
    "            train_loss_summary = tf.summary.scalar(\"loss\", train_dkt.loss)\n",
    "            train_summary_op = tf.summary.merge([train_loss_summary, grad_summaries_merged])\n",
    "            train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n",
    "            train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n",
    "\n",
    "            # 测试时的 summaries\n",
    "            test_loss_summary = tf.summary.scalar(\"loss\", test_dkt.loss)\n",
    "            dev_summary_op = tf.summary.merge([test_loss_summary])\n",
    "            dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n",
    "            dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n",
    "\n",
    "            saver = tf.train.Saver(tf.global_variables())\n",
    "\n",
    "            sess.run(tf.global_variables_initializer())\n",
    "\n",
    "            print(\"初始化完毕，开始训练\")\n",
    "            for i in range(config.trainConfig.epochs):\n",
    "                np.random.shuffle(train_seqs)\n",
    "                for params in dataGen.next_batch(train_seqs):\n",
    "                    # 批次获得训练集，训练模型\n",
    "                    self.train_step(params, train_op, train_summary_op, train_summary_writer)\n",
    "\n",
    "                    current_step = tf.train.global_step(sess, global_step)\n",
    "                    # train_step.run(feed_dict={x: batch_train[0], y_actual: batch_train[1], keep_prob: 0.5})\n",
    "                    # 对结果进行记录\n",
    "                    if current_step % config.trainConfig.evaluate_every == 0:\n",
    "                        print(\"\\nEvaluation:\")\n",
    "                        # 获得测试数据\n",
    "\n",
    "                        losses = []\n",
    "                        accuracys = []\n",
    "                        aucs = []\n",
    "                        for params in dataGen.next_batch(test_seqs):\n",
    "                            loss, accuracy, auc = self.dev_step(params, dev_summary_op, writer=None)\n",
    "                            losses.append(loss)\n",
    "                            accuracys.append(accuracy)\n",
    "                            aucs.append(auc)\n",
    "\n",
    "                        time_str = datetime.datetime.now().isoformat()\n",
    "                        print(\"dev: {}, step: {}, loss: {}, acc: {}, auc: {}\".\n",
    "                              format(time_str, current_step, mean(losses), mean(accuracys), mean(aucs)))\n",
    "\n",
    "                    if current_step % config.trainConfig.checkpoint_every == 0:\n",
    "                        path = saver.save(sess, \"model/my-model\", global_step=current_step)\n",
    "                        print(\"Saved model checkpoint to {}\\n\".format(path))\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    fileName = \"./data/knowledgeTracing.csv\"\n",
    "    dktEngine = DKTEngine()\n",
    "    dktEngine.run_epoch(fileName)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step: 1\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.651437901683 0.630281690141\n",
      "step: 2\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.852083988143 0.892568659128\n",
      "step: 3\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.821706453103 0.947848761408\n",
      "step: 4\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.761215150662 0.814510097233\n",
      "step: 5\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.714353598734 0.834129692833\n",
      "step: 6\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.828215200101 0.784359093012\n",
      "step: 7\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.835155906546 0.773483365949\n",
      "step: 8\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.811198237741 0.750379362671\n",
      "step: 9\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.875720174433 0.840540540541\n",
      "step: 10\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.828990439151 0.830357142857\n",
      "step: 11\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.834634722666 0.830258302583\n",
      "step: 12\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.887835178915 0.847102342787\n",
      "step: 13\n",
      "INFO:tensorflow:Restoring parameters from model/my-model-800\n",
      "0.885534239058 0.827205882353\n",
      "inference  auc: 0.8144677839180899  acc: 0.815617302576507\n"
     ]
    }
   ],
   "source": [
    "# 模型预测\n",
    "def load_model(fileName):\n",
    "    # 实例化配置参数对象\n",
    "    config = Config()\n",
    "\n",
    "    # 实例化数据生成对象\n",
    "    dataGen = DataGenerator(fileName, config)\n",
    "    dataGen.gen_attr()  # 生成训练集和测试集\n",
    "\n",
    "    test_seqs = dataGen.test_seqs\n",
    "\n",
    "    with tf.Session() as sess:\n",
    "\n",
    "        accuracys = []\n",
    "        aucs = []\n",
    "        step = 1\n",
    "\n",
    "        for params in dataGen.next_batch(test_seqs):\n",
    "            print(\"step: {}\".format(step))\n",
    "\n",
    "            saver = tf.train.import_meta_graph(\"model/my-model-800.meta\")\n",
    "            saver.restore(sess, tf.train.latest_checkpoint(\"model/\"))\n",
    "\n",
    "            # 获得默认的计算图结构\n",
    "            graph = tf.get_default_graph()\n",
    "\n",
    "            # 获得需要喂给模型的参数，输出的结果依赖的输入值\n",
    "            input_x = graph.get_operation_by_name(\"test/dkt/input_x\").outputs[0]\n",
    "            target_id = graph.get_operation_by_name(\"test/dkt/target_id\").outputs[0]\n",
    "            keep_prob = graph.get_operation_by_name(\"test/dkt/keep_prob\").outputs[0]\n",
    "            max_steps = graph.get_operation_by_name(\"test/dkt/max_steps\").outputs[0]\n",
    "            sequence_len = graph.get_operation_by_name(\"test/dkt/sequence_len\").outputs[0]\n",
    "\n",
    "            # 获得输出的结果\n",
    "            pred_all = graph.get_tensor_by_name(\"test/dkt/pred_all:0\")\n",
    "            pred = graph.get_tensor_by_name(\"test/dkt/pred:0\")\n",
    "            binary_pred = graph.get_tensor_by_name(\"test/dkt/binary_pred:0\")\n",
    "\n",
    "            target_correctness = params['target_correctness']\n",
    "            pred_all, pred, binary_pred = sess.run([pred_all, pred, binary_pred],\n",
    "                                                   feed_dict={input_x: params[\"input_x\"],\n",
    "                                                              target_id: params[\"target_id\"],\n",
    "                                                              keep_prob: 1.0,\n",
    "                                                              max_steps: params[\"max_len\"],\n",
    "                                                              sequence_len: params[\"seq_len\"]})\n",
    "\n",
    "            auc, acc = gen_metrics(params[\"seq_len\"], binary_pred, pred, target_correctness)\n",
    "            print(auc, acc)\n",
    "            accuracys.append(acc)\n",
    "            aucs.append(auc)\n",
    "            step += 1\n",
    "\n",
    "        aucMean = mean(aucs)\n",
    "        accMean = mean(accuracys)\n",
    "\n",
    "        print(\"inference  auc: {}  acc: {}\".format(aucMean, accMean))\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    fileName = \"./data/knowledgeTracing.csv\"\n",
    "    load_model(fileName)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
