{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import gc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "path = './target_data/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "train = pd.read_csv(path + 'train_gbdt_leaf.csv',dtype='int8')\n",
    "test = pd.read_csv(path + 'test_gbdt_leaf.csv',dtype='int8')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 7377418 entries, 0 to 7377417\n",
      "Data columns (total 51 columns):\n",
      "class1     int8\n",
      "class2     int8\n",
      "class3     int8\n",
      "class4     int8\n",
      "class5     int8\n",
      "class6     int8\n",
      "class7     int8\n",
      "class8     int8\n",
      "class9     int8\n",
      "class10    int8\n",
      "class11    int8\n",
      "class12    int8\n",
      "class13    int8\n",
      "class14    int8\n",
      "class15    int8\n",
      "class16    int8\n",
      "class17    int8\n",
      "class18    int8\n",
      "class19    int8\n",
      "class20    int8\n",
      "class21    int8\n",
      "class22    int8\n",
      "class23    int8\n",
      "class24    int8\n",
      "class25    int8\n",
      "class26    int8\n",
      "class27    int8\n",
      "class28    int8\n",
      "class29    int8\n",
      "class30    int8\n",
      "class31    int8\n",
      "class32    int8\n",
      "class33    int8\n",
      "class34    int8\n",
      "class35    int8\n",
      "class36    int8\n",
      "class37    int8\n",
      "class38    int8\n",
      "class39    int8\n",
      "class40    int8\n",
      "class41    int8\n",
      "class42    int8\n",
      "class43    int8\n",
      "class44    int8\n",
      "class45    int8\n",
      "class46    int8\n",
      "class47    int8\n",
      "class48    int8\n",
      "class49    int8\n",
      "class50    int8\n",
      "target     int8\n",
      "dtypes: int8(51)\n",
      "memory usage: 358.8 MB\n"
     ]
    }
   ],
   "source": [
    "train.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "target = train.pop('target')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "orig_train = pd.read_csv(path + 'train.csv')\n",
    "orig_test = pd.read_csv(path + 'test.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['artist_composer', 'artist_composer_lyricist', 'artist_lyricist',\n",
       "       'artist_name', 'bd', 'city', 'composer', 'expiration_date',\n",
       "       'expiration_month', 'expiration_year', 'gender', 'genre_ids',\n",
       "       'isrc_name', 'language', 'lyricist', 'membership_days', 'msno',\n",
       "       'registered_via', 'registration_date', 'registration_month',\n",
       "       'registration_year', 'song_id', 'song_length', 'song_year',\n",
       "       'source_screen_name', 'source_system_tab', 'source_type',\n",
       "       'genre_ids_count', 'lyricists_count', 'composer_count', 'artist_count',\n",
       "       'is_featured', 'song_lang_boolean', 'smaller_song', 'count_song_played',\n",
       "       'count_artist_played', 'count_user_played', 'target'],\n",
       "      dtype='object')"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "orig_train.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "col_num = ['artist_composer', 'artist_composer_lyricist', 'artist_lyricist',\n",
    "       'bd', 'expiration_date',\n",
    "       'expiration_month', 'expiration_year', \n",
    "       'membership_days', 'msno',\n",
    "        'registration_date', 'registration_month',\n",
    "       'registration_year', 'song_id', 'song_length', 'song_year',\n",
    "       'genre_ids_count', 'lyricists_count', 'composer_count', 'artist_count',\n",
    "       'is_featured', 'song_lang_boolean', 'smaller_song', 'count_song_played',\n",
    "       'count_artist_played', 'count_user_played']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "orig_train = orig_train[col_num]\n",
    "orig_test = orig_test[col_num]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### msno和song_id用hash编码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_extraction import FeatureHasher\n",
    "fh = FeatureHasher(n_features=10,input_type='string')\n",
    "df = orig_train.append(orig_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "msno_hash = fh.fit_transform(df['msno'].astype('str'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "song_hash = fh.fit_transform(df['song_id'].astype('str'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "msno_hash = pd.DataFrame(msno_hash.toarray(),dtype='float16')\n",
    "song_hash = pd.DataFrame(song_hash.toarray(),dtype='float16')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "hashcode = pd.concat([msno_hash,song_hash],axis=1,ignore_index=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "hashcode.columns = ['hash_%d'%i for i in range(1,21)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>hash_1</th>\n",
       "      <th>hash_2</th>\n",
       "      <th>hash_3</th>\n",
       "      <th>hash_4</th>\n",
       "      <th>hash_5</th>\n",
       "      <th>hash_6</th>\n",
       "      <th>hash_7</th>\n",
       "      <th>hash_8</th>\n",
       "      <th>hash_9</th>\n",
       "      <th>hash_10</th>\n",
       "      <th>hash_11</th>\n",
       "      <th>hash_12</th>\n",
       "      <th>hash_13</th>\n",
       "      <th>hash_14</th>\n",
       "      <th>hash_15</th>\n",
       "      <th>hash_16</th>\n",
       "      <th>hash_17</th>\n",
       "      <th>hash_18</th>\n",
       "      <th>hash_19</th>\n",
       "      <th>hash_20</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.0</td>\n",
       "      <td>-6.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>7.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>3.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2.0</td>\n",
       "      <td>-4.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2.0</td>\n",
       "      <td>-4.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>-5.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>2.0</td>\n",
       "      <td>-4.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>-4.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.0</td>\n",
       "      <td>-6.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>-3.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>7.0</td>\n",
       "      <td>7.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>7.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>-4.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>6.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   hash_1  hash_2  hash_3  hash_4  hash_5  hash_6  hash_7  hash_8  hash_9  \\\n",
       "0     0.0    -6.0     2.0    -3.0     1.0     0.0     1.0    -3.0    -1.0   \n",
       "1     2.0    -4.0     4.0     1.0     2.0    -1.0     3.0    -3.0    -2.0   \n",
       "2     2.0    -4.0     4.0     1.0     2.0    -1.0     3.0    -3.0    -2.0   \n",
       "3     2.0    -4.0     4.0     1.0     2.0    -1.0     3.0    -3.0    -2.0   \n",
       "4     0.0    -6.0     2.0    -3.0     1.0     0.0     1.0    -3.0    -1.0   \n",
       "\n",
       "   hash_10  hash_11  hash_12  hash_13  hash_14  hash_15  hash_16  hash_17  \\\n",
       "0      7.0     -1.0     -1.0      2.0      2.0      4.0     -3.0      2.0   \n",
       "1      0.0      2.0     -3.0      4.0      2.0      2.0     -2.0     -1.0   \n",
       "2      0.0      3.0     -1.0     -1.0      4.0      0.0      0.0     -2.0   \n",
       "3      0.0      0.0     -3.0      2.0      1.0     -1.0     -4.0      2.0   \n",
       "4      7.0      7.0     -2.0      7.0      2.0      0.0      0.0      1.0   \n",
       "\n",
       "   hash_18  hash_19  hash_20  \n",
       "0     -2.0      0.0      3.0  \n",
       "1     -1.0     -1.0      2.0  \n",
       "2     -5.0      2.0      2.0  \n",
       "3     -2.0      1.0      2.0  \n",
       "4     -4.0     -1.0      6.0  "
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "hashcode.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 处理数值特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "log化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "col_log = ['song_length','lyricists_count','composer_count','artist_count',\n",
    "           'count_song_played','count_artist_played','count_user_played']\n",
    "df[col_log] = np.log1p(df[col_log])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "标准化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.preprocessing import StandardScaler\n",
    "col_sta = ['bd','expiration_date','expiration_year','registration_date','registration_year',\n",
    "          'genre_ids_count','membership_days','song_year']\n",
    "df[col_sta] = StandardScaler().fit_transform(df[col_sta])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 创建模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train = pd.concat([df.iloc[:len(train)],train],axis=1)\n",
    "x_test = pd.concat([df.iloc[len(train):],test],axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train.drop(['msno','song_id'],axis=1,inplace=True)\n",
    "x_test.drop(['msno','song_id'],axis=1,inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_id = np.arange(0,len(test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LibLinear]"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LogisticRegression(C=0.01, class_weight=None, dual=False, fit_intercept=True,\n",
       "                   intercept_scaling=1, l1_ratio=None, max_iter=100,\n",
       "                   multi_class='warn', n_jobs=-1, penalty='l2',\n",
       "                   random_state=None, solver='warn', tol=0.0001, verbose=5,\n",
       "                   warm_start=False)"
      ]
     },
     "execution_count": 62,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lr = LogisticRegression(C = 0.01,verbose=5,n_jobs=-1)\n",
    "lr.fit(x_train,target)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred = lr.predict_proba(x_test)[:,1]\n",
    "pred = pd.DataFrame({'id':test_id,'target':pred})\n",
    "pred.to_csv(path + 'sumbmission.csv.gz',compression='gzip',float_format=\"%.5f\",index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
