{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T06:52:07.416929Z",
     "start_time": "2019-08-18T06:52:07.400270Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['set_series_1.csv',\n",
       " 'app_info.csv',\n",
       " 'age_train.csv',\n",
       " 'user_basic_info.csv',\n",
       " 'user_app_actived.csv',\n",
       " 'age_test.csv',\n",
       " 'user_behavior_info.csv',\n",
       " 'user_app_usage.csv',\n",
       " 'times_usage_appid.csv']"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 导入相关库\n",
    "import os\n",
    "import pandas as pd\n",
    "from tqdm.autonotebook import *\n",
    "from sklearn.decomposition import LatentDirichletAllocation\n",
    "from sklearn.metrics import accuracy_score\n",
    "import time\n",
    "from sklearn.feature_extraction.text import TfidfTransformer\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from scipy.sparse import hstack\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from gensim.models import FastText, Word2Vec\n",
    "import re\n",
    "from keras.layers import *\n",
    "from keras.models import *\n",
    "from keras.preprocessing.text import Tokenizer, text_to_word_sequence\n",
    "from keras.preprocessing.sequence import pad_sequences\n",
    "from keras.preprocessing import text, sequence\n",
    "from keras.callbacks import *\n",
    "from keras.layers.advanced_activations import LeakyReLU, PReLU\n",
    "import keras.backend as K\n",
    "from keras.optimizers import *\n",
    "from keras.utils import to_categorical\n",
    "import tensorflow as tf\n",
    "import random as rn\n",
    "import gc\n",
    "import logging\n",
    "import gensim\n",
    "import jieba\n",
    "os.environ['PYTHONHASHSEED'] = '0'\n",
    "# 显卡使用（如没显卡需要注释掉）\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = \"3\"\n",
    "np.random.seed(1024)\n",
    "rn.seed(1024)\n",
    "tf.set_random_seed(1024)\n",
    "path=\"data/\"\n",
    "os.listdir(\"data/\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T05:47:24.508068Z",
     "start_time": "2019-08-18T05:47:24.496886Z"
    }
   },
   "outputs": [],
   "source": [
    "# 读入数据（需加速）\n",
    "def get_age_data():\n",
    "    train_data = pd.read_csv(path + 'age_train.csv', header=None)\n",
    "    test_data = pd.read_csv(path + 'age_test.csv', header=None)\n",
    "    data = pd.concat([train_data, test_data], axis=0, sort=False).fillna(-1)\n",
    "    data.columns = ['uId', 'age_group']\n",
    "    return data\n",
    "\n",
    "def get_user_app_actived():\n",
    "    data = pd.read_csv(path + 'user_app_actived.csv', header=None)\n",
    "    data.columns = ['uId', 'appId']\n",
    "    return data\n",
    "\n",
    "def get_user_behavior_info():\n",
    "    data = pd.read_csv(path + 'user_behavior_info.csv', header=None)\n",
    "    data.columns = ['uId', 'bootTimes', 'AFuncTimes', 'BFuncTimes', 'CFuncTimes',\n",
    "                   'DFuncTimes', 'EFuncTimes', 'FFuncTimes', 'FFuncSum']\n",
    "    return data\n",
    "\n",
    "def get_user_basic_info():\n",
    "    data = pd.read_csv(path + 'user_basic_info.csv', header=None)\n",
    "    data.columns = ['uId', 'gender', 'city', 'prodName', 'ramCapacity', \n",
    "                   'ramLeftRation', 'romCapacity', 'romLeftRation', 'color',\n",
    "                   'fontSize', 'ct', 'carrier', 'os']\n",
    "    return data\n",
    "\n",
    "def get_app_info():\n",
    "    data = pd.read_csv(path + 'app_info.csv', header=None)\n",
    "    data.columns = ['appId', 'category']\n",
    "    return data\n",
    "\n",
    "# 测试的时候用True\n",
    "# 提特征改用False\n",
    "def get_user_app_usage(less_data=False):\n",
    "    if less_data:\n",
    "        reader = pd.read_csv(path + 'user_app_usage.csv', chunksize=2000000)\n",
    "        for i in reader:\n",
    "            data = i\n",
    "            break\n",
    "    else:\n",
    "        data = pd.read_csv(path + 'user_app_usage.csv', header=None)\n",
    "    data.columns = ['uId', 'appId', 'duration', 'times', 'use_date']\n",
    "    return data\n",
    "\n",
    "def get_time_usage():\n",
    "    data = pd.read_csv(path + 'set_series_1.csv', header=None)\n",
    "    data.columns = ['uId', 'timesUsage']\n",
    "    return data\n",
    "\n",
    "def get_usage_set():\n",
    "    data = pd.read_csv(path + 'nurbs_set.csv', header=None)\n",
    "    data.columns = ['uId', 'timeCount']\n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T06:39:41.644580Z",
     "start_time": "2019-08-18T06:39:41.561682Z"
    }
   },
   "outputs": [],
   "source": [
    "# 需要用到的函数\n",
    "class AdamW(Optimizer):\n",
    "    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n",
    "                 epsilon=1e-8, decay=0., **kwargs):\n",
    "        super(AdamW, self).__init__(**kwargs)\n",
    "        with K.name_scope(self.__class__.__name__):\n",
    "            self.iterations = K.variable(0, dtype='int64', name='iterations')\n",
    "            self.lr = K.variable(lr, name='lr')\n",
    "            self.beta_1 = K.variable(beta_1, name='beta_1')\n",
    "            self.beta_2 = K.variable(beta_2, name='beta_2')\n",
    "            self.decay = K.variable(decay, name='decay')\n",
    "            # decoupled weight decay (2/4)\n",
    "            self.wd = K.variable(weight_decay, name='weight_decay')\n",
    "        self.epsilon = epsilon\n",
    "        self.initial_decay = decay\n",
    "\n",
    "    @interfaces.legacy_get_updates_support\n",
    "    def get_updates(self, loss, params):\n",
    "        grads = self.get_gradients(loss, params)\n",
    "        self.updates = [K.update_add(self.iterations, 1)]\n",
    "        wd = self.wd  # decoupled weight decay (3/4)\n",
    "\n",
    "        lr = self.lr\n",
    "        if self.initial_decay > 0:\n",
    "            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n",
    "                                                  K.dtype(self.decay))))\n",
    "\n",
    "        t = K.cast(self.iterations, K.floatx()) + 1\n",
    "        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n",
    "                     (1. - K.pow(self.beta_1, t)))\n",
    "\n",
    "        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n",
    "        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n",
    "        self.weights = [self.iterations] + ms + vs\n",
    "\n",
    "        for p, g, m, v in zip(params, grads, ms, vs):\n",
    "            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n",
    "            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n",
    "            # decoupled weight decay (4/4)\n",
    "            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p\n",
    "\n",
    "            self.updates.append(K.update(m, m_t))\n",
    "            self.updates.append(K.update(v, v_t))\n",
    "            new_p = p_t\n",
    "\n",
    "            # Apply constraints.\n",
    "            if getattr(p, 'constraint', None) is not None:\n",
    "                new_p = p.constraint(new_p)\n",
    "\n",
    "            self.updates.append(K.update(p, new_p))\n",
    "        return self.updates\n",
    "\n",
    "    def get_config(self):\n",
    "        config = {'lr': float(K.get_value(self.lr)),\n",
    "                  'beta_1': float(K.get_value(self.beta_1)),\n",
    "                  'beta_2': float(K.get_value(self.beta_2)),\n",
    "                  'decay': float(K.get_value(self.decay)),\n",
    "                  'weight_decay': float(K.get_value(self.wd)),\n",
    "                  'epsilon': self.epsilon}\n",
    "        base_config = super(AdamW, self).get_config()\n",
    "        return dict(list(base_config.items()) + list(config.items()))\n",
    "\n",
    "\n",
    "from keras.engine.topology import Layer\n",
    "class Attention(Layer):\n",
    "    def __init__(self, step_dim,\n",
    "                 W_regularizer=None, b_regularizer=None,\n",
    "                 W_constraint=None, b_constraint=None,\n",
    "                 bias=True, **kwargs):\n",
    "        self.supports_masking = True\n",
    "        self.init = initializers.get('glorot_uniform')\n",
    "\n",
    "        self.W_regularizer = regularizers.get(W_regularizer)\n",
    "        self.b_regularizer = regularizers.get(b_regularizer)\n",
    "\n",
    "        self.W_constraint = constraints.get(W_constraint)\n",
    "        self.b_constraint = constraints.get(b_constraint)\n",
    "\n",
    "        self.bias = bias\n",
    "        self.step_dim = step_dim\n",
    "        self.features_dim = 0\n",
    "        super(Attention, self).__init__(**kwargs)\n",
    "\n",
    "    def build(self, input_shape):\n",
    "        assert len(input_shape) == 3\n",
    "\n",
    "        self.W = self.add_weight((input_shape[-1],),\n",
    "                                 initializer=self.init,\n",
    "                                 name='{}_W'.format(self.name),\n",
    "                                 regularizer=self.W_regularizer,\n",
    "                                 constraint=self.W_constraint)\n",
    "        self.features_dim = input_shape[-1]\n",
    "\n",
    "        if self.bias:\n",
    "            self.b = self.add_weight((input_shape[1],),\n",
    "                                     initializer='zero',\n",
    "                                     name='{}_b'.format(self.name),\n",
    "                                     regularizer=self.b_regularizer,\n",
    "                                     constraint=self.b_constraint)\n",
    "        else:\n",
    "            self.b = None\n",
    "\n",
    "        self.built = True\n",
    "\n",
    "    def compute_mask(self, input, input_mask=None):\n",
    "        return None\n",
    "\n",
    "    def call(self, x, mask=None):\n",
    "        features_dim = self.features_dim\n",
    "        step_dim = self.step_dim\n",
    "\n",
    "        eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),\n",
    "                        K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\n",
    "\n",
    "        if self.bias:\n",
    "            eij += self.b\n",
    "\n",
    "        eij = K.tanh(eij)\n",
    "\n",
    "        a = K.exp(eij)\n",
    "\n",
    "        if mask is not None:\n",
    "            a *= K.cast(mask, K.floatx())\n",
    "\n",
    "        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n",
    "\n",
    "        a = K.expand_dims(a)\n",
    "        weighted_input = x * a\n",
    "        return K.sum(weighted_input, axis=1)\n",
    "\n",
    "    def compute_output_shape(self, input_shape):\n",
    "        return input_shape[0],  self.features_dim\n",
    "import keras\n",
    "class RAdam(keras.optimizers.Optimizer):\n",
    "    \"\"\"RAdam optimizer.\n",
    "    # Arguments\n",
    "        lr: float >= 0. Learning rate.\n",
    "        beta_1: float, 0 < beta < 1. Generally close to 1.\n",
    "        beta_2: float, 0 < beta < 1. Generally close to 1.\n",
    "        epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.\n",
    "        decay: float >= 0. Learning rate decay over each update.\n",
    "        weight_decay: float >= 0. Weight decay for each param.\n",
    "        amsgrad: boolean. Whether to apply the AMSGrad variant of this\n",
    "            algorithm from the paper \"On the Convergence of Adam and\n",
    "            Beyond\".\n",
    "        total_steps: int >= 0. Total number of training steps. Enable warmup by setting a positive value.\n",
    "        warmup_proportion: 0 < warmup_proportion < 1. The proportion of increasing steps.\n",
    "        min_lr: float >= 0. Minimum learning rate after warmup.\n",
    "    # References\n",
    "        - [Adam - A Method for Stochastic Optimization](https://arxiv.org/abs/1412.6980v8)\n",
    "        - [On the Convergence of Adam and Beyond](https://openreview.net/forum?id=ryQu7f-RZ)\n",
    "        - [On The Variance Of The Adaptive Learning Rate And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf)\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,\n",
    "                 epsilon=None, decay=0., weight_decay=0., amsgrad=False,\n",
    "                 total_steps=0, warmup_proportion=0.1, min_lr=0., **kwargs):\n",
    "        super(RAdam, self).__init__(**kwargs)\n",
    "        with K.name_scope(self.__class__.__name__):\n",
    "            self.iterations = K.variable(0, dtype='int64', name='iterations')\n",
    "            self.lr = K.variable(lr, name='lr')\n",
    "            self.beta_1 = K.variable(beta_1, name='beta_1')\n",
    "            self.beta_2 = K.variable(beta_2, name='beta_2')\n",
    "            self.decay = K.variable(decay, name='decay')\n",
    "            self.weight_decay = K.variable(weight_decay, name='weight_decay')\n",
    "            self.total_steps = K.variable(total_steps, name='total_steps')\n",
    "            self.warmup_proportion = K.variable(warmup_proportion, name='warmup_proportion')\n",
    "            self.min_lr = K.variable(lr, name='min_lr')\n",
    "        if epsilon is None:\n",
    "            epsilon = K.epsilon()\n",
    "        self.epsilon = epsilon\n",
    "        self.initial_decay = decay\n",
    "        self.initial_weight_decay = weight_decay\n",
    "        self.initial_total_steps = total_steps\n",
    "        self.amsgrad = amsgrad\n",
    "\n",
    "    def get_updates(self, loss, params):\n",
    "        grads = self.get_gradients(loss, params)\n",
    "        self.updates = [K.update_add(self.iterations, 1)]\n",
    "\n",
    "        lr = self.lr\n",
    "\n",
    "        if self.initial_decay > 0:\n",
    "            lr = lr * (1. / (1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))\n",
    "\n",
    "        t = K.cast(self.iterations, K.floatx()) + 1\n",
    "\n",
    "        if self.initial_total_steps > 0:\n",
    "            warmup_steps = self.total_steps * self.warmup_proportion\n",
    "            decay_steps = self.total_steps - warmup_steps\n",
    "            lr = K.switch(\n",
    "                t <= warmup_steps,\n",
    "                lr * (t / warmup_steps),\n",
    "                lr * (1.0 - K.minimum(t, decay_steps) / decay_steps),\n",
    "            )\n",
    "\n",
    "        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='m_' + str(i)) for (i, p) in enumerate(params)]\n",
    "        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='v_' + str(i)) for (i, p) in enumerate(params)]\n",
    "\n",
    "        if self.amsgrad:\n",
    "            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='vhat_' + str(i)) for (i, p) in enumerate(params)]\n",
    "        else:\n",
    "            vhats = [K.zeros(1, name='vhat_' + str(i)) for i in range(len(params))]\n",
    "\n",
    "        self.weights = [self.iterations] + ms + vs + vhats\n",
    "\n",
    "        beta_1_t = K.pow(self.beta_1, t)\n",
    "        beta_2_t = K.pow(self.beta_2, t)\n",
    "\n",
    "        sma_inf = 2.0 / (1.0 - self.beta_2) - 1.0\n",
    "        sma_t = sma_inf - 2.0 * t * beta_2_t / (1.0 - beta_2_t)\n",
    "\n",
    "        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):\n",
    "            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n",
    "            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n",
    "\n",
    "            m_corr_t = m_t / (1.0 - beta_1_t)\n",
    "            if self.amsgrad:\n",
    "                vhat_t = K.maximum(vhat, v_t)\n",
    "                v_corr_t = K.sqrt(vhat_t / (1.0 - beta_2_t) + self.epsilon)\n",
    "                self.updates.append(K.update(vhat, vhat_t))\n",
    "            else:\n",
    "                v_corr_t = K.sqrt(v_t / (1.0 - beta_2_t) + self.epsilon)\n",
    "\n",
    "            r_t = K.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *\n",
    "                         (sma_t - 2.0) / (sma_inf - 2.0) *\n",
    "                         sma_inf / sma_t)\n",
    "\n",
    "            p_t = K.switch(sma_t > 5, r_t * m_corr_t / v_corr_t, m_corr_t)\n",
    "\n",
    "            if self.initial_weight_decay > 0:\n",
    "                p_t += self.weight_decay * p\n",
    "\n",
    "            p_t = p - lr * p_t\n",
    "\n",
    "            self.updates.append(K.update(m, m_t))\n",
    "            self.updates.append(K.update(v, v_t))\n",
    "            new_p = p_t\n",
    "\n",
    "            # Apply constraints.\n",
    "            if getattr(p, 'constraint', None) is not None:\n",
    "                new_p = p.constraint(new_p)\n",
    "\n",
    "            self.updates.append(K.update(p, new_p))\n",
    "        return self.updates\n",
    "\n",
    "    def get_config(self):\n",
    "        config = {\n",
    "            'lr': float(K.get_value(self.lr)),\n",
    "            'beta_1': float(K.get_value(self.beta_1)),\n",
    "            'beta_2': float(K.get_value(self.beta_2)),\n",
    "            'decay': float(K.get_value(self.decay)),\n",
    "            'weight_decay': float(K.get_value(self.weight_decay)),\n",
    "            'epsilon': self.epsilon,\n",
    "            'amsgrad': self.amsgrad,\n",
    "            'total_steps': float(K.get_value(self.total_steps)),\n",
    "            'warmup_proportion': float(K.get_value(self.warmup_proportion)),\n",
    "            'min_lr': float(K.get_value(self.min_lr)),\n",
    "        }\n",
    "        base_config = super(RAdam, self).get_config()\n",
    "        return dict(list(base_config.items()) + list(config.items()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T05:50:16.415108Z",
     "start_time": "2019-08-18T05:47:43.096803Z"
    },
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>uId</th>\n",
       "      <th>age_group</th>\n",
       "      <th>appId</th>\n",
       "      <th>appInfo</th>\n",
       "      <th>timesUsage</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1000001</td>\n",
       "      <td>4.0</td>\n",
       "      <td>a00140327#a00170298#a00184278#a00187480#a00239...</td>\n",
       "      <td>汽车 实用工具 实用工具 影音娱乐 汽车 未知 便捷生活 实用工具 实用工具 实用工具 拍摄...</td>\n",
       "      <td>a00289791 a00289826 a00290038 a00290037 a00276...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1000011</td>\n",
       "      <td>3.0</td>\n",
       "      <td>a00158535#a00163116#a00170432#a00187480#a00224...</td>\n",
       "      <td>实用工具 新闻阅读 购物比价 影音娱乐 角色扮演 影音娱乐 购物比价 社交通讯 社交通讯 新...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1000015</td>\n",
       "      <td>5.0</td>\n",
       "      <td>a00109386#a00170432#a0021880#a00244790#a002475...</td>\n",
       "      <td>教育 购物比价 新闻阅读 未知 影音娱乐 拍摄美化 实用工具 社交通讯 社交通讯 实用工具 ...</td>\n",
       "      <td>a00289791 a00289826 a00278905 a00247519 a00289...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1000019</td>\n",
       "      <td>3.0</td>\n",
       "      <td>a00157060#a00274701</td>\n",
       "      <td>实用工具 拍摄美化</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1000023</td>\n",
       "      <td>2.0</td>\n",
       "      <td>a00170298#a00203358#a00275200#a00278905#a00289...</td>\n",
       "      <td>实用工具 社交通讯 实用工具 拍摄美化 实用工具 社交通讯 社交通讯 影音娱乐 影音娱乐 实...</td>\n",
       "      <td>a00278905 a00170298 a00289791 a00290038 a00289...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       uId  age_group                                              appId  \\\n",
       "0  1000001        4.0  a00140327#a00170298#a00184278#a00187480#a00239...   \n",
       "1  1000011        3.0  a00158535#a00163116#a00170432#a00187480#a00224...   \n",
       "2  1000015        5.0  a00109386#a00170432#a0021880#a00244790#a002475...   \n",
       "3  1000019        3.0                                a00157060#a00274701   \n",
       "4  1000023        2.0  a00170298#a00203358#a00275200#a00278905#a00289...   \n",
       "\n",
       "                                             appInfo  \\\n",
       "0  汽车 实用工具 实用工具 影音娱乐 汽车 未知 便捷生活 实用工具 实用工具 实用工具 拍摄...   \n",
       "1  实用工具 新闻阅读 购物比价 影音娱乐 角色扮演 影音娱乐 购物比价 社交通讯 社交通讯 新...   \n",
       "2  教育 购物比价 新闻阅读 未知 影音娱乐 拍摄美化 实用工具 社交通讯 社交通讯 实用工具 ...   \n",
       "3                                         实用工具 拍摄美化    \n",
       "4  实用工具 社交通讯 实用工具 拍摄美化 实用工具 社交通讯 社交通讯 影音娱乐 影音娱乐 实...   \n",
       "\n",
       "                                          timesUsage  \n",
       "0  a00289791 a00289826 a00290038 a00290037 a00276...  \n",
       "1                                                  0  \n",
       "2  a00289791 a00289826 a00278905 a00247519 a00289...  \n",
       "3                                                  0  \n",
       "4  a00278905 a00170298 a00289791 a00290038 a00289...  "
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### 读入数据(想要五输入)\n",
    "id_label = get_age_data()\n",
    "active_data = get_user_app_actived()\n",
    "data = pd.merge(id_label, active_data, on='uId', how='left')\n",
    "data_info = get_app_info()\n",
    "cat_dict = dict(zip(data_info['appId'], data_info['category']))\n",
    "def get_review_data(row):\n",
    "    review = ''\n",
    "    app_list = row['appId'].split('#')\n",
    "    for i in app_list:\n",
    "        try:\n",
    "            review += cat_dict[i] + ' '\n",
    "        except Exception:\n",
    "            review += '未知' + ' ' \n",
    "    return review\n",
    "data['appInfo'] = data.apply(lambda row:get_review_data(row), axis=1)\n",
    "time_usage_data = get_time_usage()\n",
    "data = pd.merge(data, time_usage_data, on='uId', how='left')\n",
    "data = data.fillna('未知')\n",
    "data.head(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T05:50:16.461161Z",
     "start_time": "2019-08-18T05:50:16.416938Z"
    }
   },
   "outputs": [],
   "source": [
    "del id_label, active_data, data_info, cat_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T05:50:16.475321Z",
     "start_time": "2019-08-18T05:50:16.465137Z"
    }
   },
   "outputs": [],
   "source": [
    "### Tokenizer 序列化文本\n",
    "def set_tokenizer(docs, split_char=' ', max_len=100):\n",
    "    '''\n",
    "    输入\n",
    "    docs:文本列表\n",
    "    split_char:按什么字符切割\n",
    "    max_len:截取的最大长度\n",
    "    \n",
    "    输出\n",
    "    X:序列化后的数据\n",
    "    word_index:文本和数字对应的索引\n",
    "    '''\n",
    "    tokenizer = Tokenizer(lower=False, char_level=False, split=split_char)\n",
    "    tokenizer.fit_on_texts(docs)\n",
    "    X = tokenizer.texts_to_sequences(docs)\n",
    "    maxlen = max_len\n",
    "    X = pad_sequences(X, maxlen=maxlen, value=0)\n",
    "    word_index=tokenizer.word_index\n",
    "    return X, word_index\n",
    "\n",
    "### 做embedding 这里采用word2vec 可以换成其他例如（glove词向量）\n",
    "def trian_save_word2vec(docs, embed_size=300, save_name='w2v.txt', split_char=' '):\n",
    "    '''\n",
    "    输入\n",
    "    docs:输入的文本列表\n",
    "    embed_size:embed长度\n",
    "    save_name:保存的word2vec位置\n",
    "    \n",
    "    输出\n",
    "    w2v:返回的模型\n",
    "    '''\n",
    "    input_docs = []\n",
    "    for i in docs:\n",
    "        input_docs.append(i.split(split_char))\n",
    "    logging.basicConfig(\n",
    "    format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)\n",
    "    w2v = Word2Vec(input_docs, size=embed_size, sg=1, window=8, seed=1017, workers=24, min_count=1, iter=10)\n",
    "    w2v.wv.save_word2vec_format(save_name)\n",
    "    print(\"w2v model done\")\n",
    "    return w2v\n",
    "\n",
    "# 得到embedding矩阵\n",
    "def get_embedding_matrix(word_index, embed_size=300, Emed_path=\"w2v_300.txt\"):\n",
    "    embeddings_index = gensim.models.KeyedVectors.load_word2vec_format(\n",
    "        Emed_path, binary=False)\n",
    "    nb_words = len(word_index)+1\n",
    "    embedding_matrix = np.zeros((nb_words, embed_size))\n",
    "    count = 0\n",
    "    for word, i in tqdm(word_index.items()):\n",
    "        if i >= nb_words:\n",
    "            continue\n",
    "        try:\n",
    "            embedding_vector = embeddings_index[word]\n",
    "        except:\n",
    "            embedding_vector = np.zeros(embed_size)\n",
    "            count += 1\n",
    "        if embedding_vector is not None:\n",
    "            embedding_matrix[i] = embedding_vector    \n",
    "    print(\"null cnt\",count)\n",
    "    return embedding_matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T06:06:34.244685Z",
     "start_time": "2019-08-18T05:50:16.478637Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始序列化\n",
      "序列化完成\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6d7aa1641fb14169b46c9082ddd0ed3f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=9401), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "null cnt 1\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5685cdb8248a419e87e743079d30ca96",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=488125), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "null cnt 0\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# appId 90\n",
    "# timesUsage 600\n",
    "text_1_list = list(data['appId'])\n",
    "text_3_list = list(data['timesUsage'])\n",
    "\n",
    "del data['appId']\n",
    "del data['timesUsage']\n",
    "\n",
    "print('开始序列化')\n",
    "x1, index_1 = set_tokenizer(text_1_list, split_char='#', max_len=90)\n",
    "x3, index_3 = set_tokenizer(text_3_list, split_char=' ', max_len=600)\n",
    "print('序列化完成')\n",
    "gc.collect()\n",
    "\n",
    "# 值得提醒的是这个保存方法是采用w2v.wv.save_word2vec_format\n",
    "# 因此你如果载入自己训练模型的时候，需要载入后再按照这个函数来保存再在emed_path中输入\n",
    "# trian_save_word2vec(text_1_list, save_name='w2v_model/cate_w2v_300.txt', split_char='#')\n",
    "# gc.collect()\n",
    "# trian_save_word2vec(text_3_list, save_name='w2v_model/w2v_300.txt', split_char=' ')\n",
    "# gc.collect()\n",
    "\n",
    "# 得到emb矩阵\n",
    "emb1 = get_embedding_matrix(index_1, Emed_path='w2v_model/cate_w2v_300.txt')\n",
    "emb3 = get_embedding_matrix(index_3, Emed_path='w2v_model/w2v_300.txt')\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T06:06:51.324010Z",
     "start_time": "2019-08-18T06:06:34.250423Z"
    }
   },
   "outputs": [],
   "source": [
    "# 将feature作为输入进行处理(这个feature一定要替换掉nan与inf)\n",
    "# 这个feature的顺序一定要对应之前的feature的顺序，保证每条是对应的\n",
    "f1 = pd.read_csv('feature/f1.csv')\n",
    "f2 = pd.read_csv('feature/f2.csv')\n",
    "f3 = pd.read_csv('feature/f3.csv')\n",
    "f4 = pd.read_csv('feature/f4.csv')\n",
    "f5 = pd.read_csv('feature/f5.csv')\n",
    "\n",
    "feature = pd.concat([f1, f2, f3, f4, f5], axis=1, sort=False)\n",
    "feature = feature.fillna(-1)\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "ss=StandardScaler()\n",
    "ss.fit(feature)\n",
    "hin_feature = ss.transform(feature)\n",
    "num_feature_input = hin_feature.shape[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T06:06:51.465708Z",
     "start_time": "2019-08-18T06:06:51.329080Z"
    }
   },
   "outputs": [],
   "source": [
    "# 区分开train和valid,test\n",
    "# 这里是假设三输入\n",
    "train_data = data[data['age_group']!=-1]\n",
    "train_input_1 = x1[:len(train_data)]\n",
    "test_input_1 = x1[len(train_data):]\n",
    "train_input_3 = x3[:len(train_data)]\n",
    "test_input_3 = x3[len(train_data):]\n",
    "train_input_5 = hin_feature[:len(train_data)]\n",
    "test_input_5 = hin_feature[len(train_data):]\n",
    "label = to_categorical(train_data['age_group'] - 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-18T12:39:50.329088Z",
     "start_time": "2019-08-18T12:39:49.764834Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "41148"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from keras.initializers import *\n",
    "from keras.activations import *\n",
    "\n",
    "def unchanged_shape(input_shape):\n",
    "    \"Function for Lambda layer\"\n",
    "    return input_shape\n",
    "\n",
    "\n",
    "def substract(input_1, input_2):\n",
    "    \"Substract element-wise\"\n",
    "    neg_input_2 = Lambda(lambda x: -x, output_shape=unchanged_shape)(input_2)\n",
    "    out_ = Add()([input_1, neg_input_2])\n",
    "    return out_\n",
    "\n",
    "\n",
    "def submult(input_1, input_2):\n",
    "    \"Get multiplication and subtraction then concatenate results\"\n",
    "    mult = Multiply()([input_1, input_2])\n",
    "    sub = substract(input_1, input_2)\n",
    "    out_ = Concatenate()([sub, mult])\n",
    "    return out_\n",
    "\n",
    "\n",
    "def apply_multiple(input_, layers):\n",
    "    \"Apply layers to input then concatenate result\"\n",
    "    if not len(layers) > 1:\n",
    "        raise ValueError('Layers list should contain more than 1 layer')\n",
    "    else:\n",
    "        agg_ = []\n",
    "        for layer in layers:\n",
    "            agg_.append(layer(input_))\n",
    "        out_ = Concatenate()(agg_)\n",
    "    return out_\n",
    "\n",
    "\n",
    "def time_distributed(input_, layers):\n",
    "    \"Apply a list of layers in TimeDistributed mode\"\n",
    "    out_ = []\n",
    "    node_ = input_\n",
    "    for layer_ in layers:\n",
    "        node_ = TimeDistributed(layer_)(node_)\n",
    "    out_ = node_\n",
    "    return out_\n",
    "\n",
    "\n",
    "def soft_attention_alignment(input_1, input_2):\n",
    "    \"Align text representation with neural soft attention\"\n",
    "    attention = Dot(axes=-1)([input_1, input_2])\n",
    "    w_att_1 = Lambda(lambda x: softmax(x, axis=1),\n",
    "                     output_shape=unchanged_shape)(attention)\n",
    "    w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2),\n",
    "                                     output_shape=unchanged_shape)(attention))\n",
    "    in1_aligned = Dot(axes=1)([w_att_1, input_1])\n",
    "    in2_aligned = Dot(axes=1)([w_att_2, input_2])\n",
    "    return in1_aligned, in2_aligned\n",
    "\n",
    "\n",
    "def model_conv(emb1, emb3, num_feature_input):\n",
    "    '''\n",
    "    注意这个inputs\n",
    "    seq1、seq2分别是两个输入\n",
    "    hin是feature层输入\n",
    "    是否做emb可选可不选，\n",
    "    这个就是我们之前训练已经得到的用于embedding的（embedding_matrix1， embedding_matrix2）\n",
    "    '''\n",
    "    K.clear_session()\n",
    "\n",
    "    emb_layer_1 = Embedding(\n",
    "        input_dim=emb1.shape[0],\n",
    "        output_dim=emb1.shape[1],\n",
    "        weights=[emb1],\n",
    "        input_length=90,\n",
    "        trainable=False\n",
    "    )\n",
    "    \n",
    "    emb_layer_3 = Embedding(\n",
    "        input_dim=emb3.shape[0],\n",
    "        output_dim=emb3.shape[1],\n",
    "        weights=[emb3],\n",
    "        input_length=600,\n",
    "        trainable=False\n",
    "    )\n",
    "    \n",
    "    \n",
    "    seq1 = Input(shape=(90,))\n",
    "    seq3 = Input(shape=(600,))    \n",
    "    \n",
    "    x1 = emb_layer_1(seq1)\n",
    "    x3 = emb_layer_3(seq3)\n",
    "    \n",
    "    sdrop=SpatialDropout1D(rate=0.2)\n",
    "    lstm_layer = Bidirectional(CuDNNLSTM(180, return_sequences=True, \n",
    "    kernel_initializer=glorot_uniform(seed = 123)))\n",
    "    gru_layer = Bidirectional(CuDNNGRU(100, return_sequences=True, \n",
    "    kernel_initializer=glorot_uniform(seed = 123)))\n",
    "    cnn1d_layer=keras.layers.Conv1D(64, kernel_size=2, padding=\"valid\", kernel_initializer=\"he_uniform\")\n",
    "    \n",
    "    x1 = sdrop(x1)\n",
    "    lstm1 = lstm_layer(x1)\n",
    "    gru1 = gru_layer(lstm1)\n",
    "    att_1 = Attention(90)(lstm1)\n",
    "    att_3 = Attention(90)(gru1)\n",
    "    cnn1 = cnn1d_layer(lstm1)\n",
    "    \n",
    "    avg_pool = GlobalAveragePooling1D()\n",
    "    max_pool = GlobalMaxPooling1D()\n",
    "    \n",
    "    x2 = sdrop(x3)\n",
    "    lstm2 = lstm_layer(x2)\n",
    "    gru2 = gru_layer(lstm2)\n",
    "    att_2 = Attention(600)(lstm2)\n",
    "    att_4 = Attention(600)(gru2)\n",
    "    cnn2 = cnn1d_layer(lstm2)\n",
    "    \n",
    "    x1=concatenate([att_1,att_3,avg_pool(cnn1),max_pool(cnn1),avg_pool(gru1),max_pool(gru1)])\n",
    "    x2=concatenate([att_2,att_4,avg_pool(cnn2),max_pool(cnn2),avg_pool(gru2),max_pool(gru2)])\n",
    "    \n",
    "    hin = Input(shape=(num_feature_input, ))\n",
    "    htime = Dense(16, activation='relu')(hin)\n",
    "    \n",
    "    x = concatenate([x1, x2, htime])\n",
    "    \n",
    "    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n",
    "    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n",
    "    pred = Dense(6, activation='softmax')(x)\n",
    "    model = Model(inputs=[seq1, seq3, hin], outputs=pred)\n",
    "#     from keras.utils import multi_gpu_model\n",
    "#     model = multi_gpu_model(model, 2)\n",
    "    model.compile(loss='categorical_crossentropy',\n",
    "                  optimizer=RAdam(lr=0.001,weight_decay=0.08,),metrics=[\"accuracy\"])\n",
    "    return model\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2019-08-18T12:39:50.472Z"
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "FOLD |  1\n",
      "#########################################################################################################\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, 90)           0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_2 (InputLayer)            (None, 600)          0                                            \n",
      "__________________________________________________________________________________________________\n",
      "embedding_1 (Embedding)         (None, 90, 300)      2820600     input_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "embedding_2 (Embedding)         (None, 600, 300)     146437800   input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "spatial_dropout1d_1 (SpatialDro multiple             0           embedding_1[0][0]                \n",
      "                                                                 embedding_2[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_1 (Bidirectional) multiple             694080      spatial_dropout1d_1[0][0]        \n",
      "                                                                 spatial_dropout1d_1[1][0]        \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_2 (Bidirectional) multiple             277200      bidirectional_1[0][0]            \n",
      "                                                                 bidirectional_1[1][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv1d_1 (Conv1D)               multiple             46144       bidirectional_1[0][0]            \n",
      "                                                                 bidirectional_1[1][0]            \n",
      "__________________________________________________________________________________________________\n",
      "attention_1 (Attention)         (None, 360)          450         bidirectional_1[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "attention_2 (Attention)         (None, 200)          290         bidirectional_2[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "global_average_pooling1d_1 (Glo multiple             0           conv1d_1[0][0]                   \n",
      "                                                                 bidirectional_2[0][0]            \n",
      "                                                                 conv1d_1[1][0]                   \n",
      "                                                                 bidirectional_2[1][0]            \n",
      "__________________________________________________________________________________________________\n",
      "global_max_pooling1d_1 (GlobalM multiple             0           conv1d_1[0][0]                   \n",
      "                                                                 bidirectional_2[0][0]            \n",
      "                                                                 conv1d_1[1][0]                   \n",
      "                                                                 bidirectional_2[1][0]            \n",
      "__________________________________________________________________________________________________\n",
      "attention_3 (Attention)         (None, 360)          960         bidirectional_1[1][0]            \n",
      "__________________________________________________________________________________________________\n",
      "attention_4 (Attention)         (None, 200)          800         bidirectional_2[1][0]            \n",
      "__________________________________________________________________________________________________\n",
      "input_3 (InputLayer)            (None, 54)           0                                            \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_1 (Concatenate)     (None, 1088)         0           attention_1[0][0]                \n",
      "                                                                 attention_2[0][0]                \n",
      "                                                                 global_average_pooling1d_1[0][0] \n",
      "                                                                 global_max_pooling1d_1[0][0]     \n",
      "                                                                 global_average_pooling1d_1[1][0] \n",
      "                                                                 global_max_pooling1d_1[1][0]     \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_2 (Concatenate)     (None, 1088)         0           attention_3[0][0]                \n",
      "                                                                 attention_4[0][0]                \n",
      "                                                                 global_average_pooling1d_1[2][0] \n",
      "                                                                 global_max_pooling1d_1[2][0]     \n",
      "                                                                 global_average_pooling1d_1[3][0] \n",
      "                                                                 global_max_pooling1d_1[3][0]     \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 16)           880         input_3[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_3 (Concatenate)     (None, 2192)         0           concatenate_1[0][0]              \n",
      "                                                                 concatenate_2[0][0]              \n",
      "                                                                 dense_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_2 (Dense)                 (None, 1000)         2193000     concatenate_3[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_1 (BatchNor (None, 1000)         4000        dense_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "activation_1 (Activation)       (None, 1000)         0           batch_normalization_1[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "dropout_1 (Dropout)             (None, 1000)         0           activation_1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "dense_3 (Dense)                 (None, 500)          500500      dropout_1[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_2 (BatchNor (None, 500)          2000        dense_3[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "activation_2 (Activation)       (None, 500)          0           batch_normalization_2[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "dense_4 (Dense)                 (None, 6)            3006        activation_2[0][0]               \n",
      "==================================================================================================\n",
      "Total params: 152,981,710\n",
      "Trainable params: 3,720,310\n",
      "Non-trainable params: 149,261,400\n",
      "__________________________________________________________________________________________________\n",
      "Train on 1608000 samples, validate on 402000 samples\n",
      "Epoch 1/50\n",
      "1608000/1608000 [==============================] - 1297s 806us/step - loss: 1.0138 - acc: 0.5704 - val_loss: 0.9172 - val_acc: 0.6122\n",
      "\n",
      "Epoch 00001: val_acc improved from -inf to 0.61218, saving model to model/nn_v2_0.h5\n",
      "Epoch 2/50\n",
      " 844800/1608000 [==============>...............] - ETA: 9:26 - loss: 0.9173 - acc: 0.6129"
     ]
    }
   ],
   "source": [
    "skf = StratifiedKFold(n_splits=5, random_state=1017, shuffle=True)\n",
    "sub = np.zeros((test_input_5.shape[0], 6))\n",
    "oof_pred = np.zeros((train_input_5.shape[0], 6))\n",
    "score = []\n",
    "count = 0\n",
    "if not os.path.exists(\"model\"):\n",
    "    os.mkdir(\"model\")\n",
    "\n",
    "for i, (train_index, test_index) in enumerate(skf.split(train_input_5, train_data['age_group'])):\n",
    "    print(\"FOLD | \", count+1)\n",
    "    print(\"###\"*35)\n",
    "    gc.collect()\n",
    "    filepath = \"model/nn_v2_%d.h5\" % count\n",
    "    checkpoint = ModelCheckpoint(\n",
    "        filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max',save_weights_only=True)\n",
    "    reduce_lr = ReduceLROnPlateau(\n",
    "        monitor='val_acc', factor=0.5, patience=3, min_lr=0.0001, verbose=1)\n",
    "    earlystopping = EarlyStopping(\n",
    "        monitor='val_acc', min_delta=0.0001, patience=5, verbose=1, mode='max')\n",
    "    callbacks = [checkpoint, reduce_lr, earlystopping]\n",
    "    model_age = model_conv(emb1, emb3, num_feature_input)\n",
    "    if count==0:model_age.summary()\n",
    "    x1_tr, x1_va = np.array(train_input_1)[train_index], np.array(train_input_1)[test_index]    \n",
    "    x3_tr, x3_va = np.array(train_input_3)[train_index], np.array(train_input_3)[test_index]\n",
    "    x5_tr, x5_va = np.array(train_input_5)[train_index], np.array(train_input_5)[test_index]\n",
    "    y_tr, y_va = label[train_index], label[test_index]\n",
    "    \n",
    "    hist = model_age.fit([x1_tr, x3_tr, x5_tr],\n",
    "                         y_tr, batch_size=1024, epochs=50, \n",
    "                         validation_data=([x1_va, x3_va, x5_va], y_va),\n",
    "                         callbacks=callbacks, verbose=1, shuffle=True)\n",
    "\n",
    "    model_age.load_weights(filepath)\n",
    "    oof_pred[test_index] = model_age.predict([x1_va, x3_va, x5_va],batch_size=2048,verbose=1)\n",
    "    sub += model_age.predict([test_input_1, test_input_3, test_input_5],batch_size=2048,verbose=1)/skf.n_splits\n",
    "    score.append(np.max(hist.history['val_acc']))\n",
    "    count += 1\n",
    "print('acc:', np.mean(score))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2019-08-18T12:39:51.882Z"
    }
   },
   "outputs": [],
   "source": [
    "test = data[data['age_group'] == -1]\n",
    "submit = test[['uId']]\n",
    "submit.columns = ['id']\n",
    "submit['label'] = sub.argmax(1)+1\n",
    "if not os.path.exists(\"result\"): \n",
    "    os.mkdir(\"result\")\n",
    "submit.to_csv(\"./result/submission.csv\",index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2019-08-18T12:39:52.081Z"
    }
   },
   "outputs": [],
   "source": [
    "oof = np.concatenate((oof_pred,sub))\n",
    "oof = pd.DataFrame(oof)\n",
    "oof.columns = [str(i+1) for i in range(6)]\n",
    "oof['id'] = pd.concat([train_data[['uId']],test[['uId']]])['uId'].values\n",
    "oof.to_csv(\"./result/v2_test.csv\",index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-20T02:54:14.069981Z",
     "start_time": "2019-08-20T02:54:12.433422Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tue Aug 20 10:54:13 2019       \n",
      "+-----------------------------------------------------------------------------+\n",
      "| NVIDIA-SMI 410.104      Driver Version: 410.104      CUDA Version: 10.0     |\n",
      "|-------------------------------+----------------------+----------------------+\n",
      "| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n",
      "| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n",
      "|===============================+======================+======================|\n",
      "|   0  Tesla V100-DGXS...  Off  | 00000000:07:00.0  On |                    0 |\n",
      "| N/A   52C    P0   251W / 300W |  31051MiB / 32475MiB |     88%      Default |\n",
      "+-------------------------------+----------------------+----------------------+\n",
      "|   1  Tesla V100-DGXS...  Off  | 00000000:08:00.0 Off |                    0 |\n",
      "| N/A   45C    P0    41W / 300W |      0MiB / 32478MiB |      0%      Default |\n",
      "+-------------------------------+----------------------+----------------------+\n",
      "|   2  Tesla V100-DGXS...  Off  | 00000000:0E:00.0 Off |                    0 |\n",
      "| N/A   52C    P0   272W / 300W |  12181MiB / 32478MiB |     99%      Default |\n",
      "+-------------------------------+----------------------+----------------------+\n",
      "|   3  Tesla V100-DGXS...  Off  | 00000000:0F:00.0 Off |                    0 |\n",
      "| N/A   45C    P0    56W / 300W |  31307MiB / 32478MiB |      0%      Default |\n",
      "+-------------------------------+----------------------+----------------------+\n",
      "                                                                               \n",
      "+-----------------------------------------------------------------------------+\n",
      "| Processes:                                                       GPU Memory |\n",
      "|  GPU       PID   Type   Process name                             Usage      |\n",
      "|=============================================================================|\n",
      "|    0      1720      G   /usr/lib/xorg/Xorg                           173MiB |\n",
      "|    0      3587      G   compiz                                       170MiB |\n",
      "|    0     14002      C   python                                     30691MiB |\n",
      "|    2     12510      C   python                                     12169MiB |\n",
      "|    3      6318      C   ...din1/anaconda3/envs/aladdin1/bin/python 31291MiB |\n",
      "+-----------------------------------------------------------------------------+\n"
     ]
    }
   ],
   "source": [
    "!nvidia-smi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-08-20T02:56:15.377817Z",
     "start_time": "2019-08-20T02:56:15.373879Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "acc: 0.6475915422809658\n"
     ]
    }
   ],
   "source": [
    "print('acc:', np.mean(score))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
