{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "((4000000, 2), (1000000, 1)) ((12460, 2), (4999341, 3), (5000000, 13), (5000000, 9), (840560515, 5))\n",
      "(5000000, 2)\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n",
    "from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer,HashingVectorizer\n",
    "from sklearn.decomposition import TruncatedSVD,SparsePCA\n",
    "from sklearn.model_selection import KFold,StratifiedKFold\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import accuracy_score,roc_auc_score,f1_score,recall_score\n",
    "\n",
    "import gc\n",
    "import time\n",
    "import os\n",
    "import sys\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "pickle_path = \"../pickle\"\n",
    "\n",
    "train = pd.read_csv(\"../data/age_train.csv\",names=['uid','age_group']).sort_values(by=['uid'])\n",
    "test = pd.read_csv(\"../data/age_test.csv\",names=['uid']).sort_values(by=['uid'])\n",
    "info = pd.read_csv(\"../data/app_info.csv\",names=['appid','category'])\n",
    "active = pd.read_pickle(\"{}/user_app_active.pickle\".format(pickle_path))\n",
    "usage = pd.read_pickle(\"{}/user_app_usage.pickle\".format(pickle_path))\n",
    "user_basic_info = pd.read_csv(\"../data/user_basic_info.csv\",names=['uid','gender','city','prodname','ramcapacity','ramleftration','romcapacity','romleftration','color','fontsize','ct','carrier','os']).sort_values(by=['uid'])\n",
    "behavior_info = pd.read_csv(\"../data/user_behavior_info.csv\",names=['uid','boottimes','a','b','c','d','e','f','g']).sort_values(by=['uid'])\n",
    "print((train.shape,test.shape),(info.shape,active.shape,user_basic_info.shape,behavior_info.shape,usage.shape))\n",
    "\n",
    "all_data = train.append(test)\n",
    "all_data = all_data.sort_values(by=['uid']).reset_index(drop=True)\n",
    "print(all_data.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from gensim import corpora, models, similarities\n",
    "from gensim.models.doc2vec import TaggedDocument\n",
    "from glove import *\n",
    "\n",
    "def get_w2c_feature(df,load_model,model,prefix):\n",
    "    w2c_arr = []\n",
    "    vocab = load_model.vocab.keys()\n",
    "    \n",
    "    for v in vocab :\n",
    "        w2c_arr.append(list(load_model.wv[v]))\n",
    "\n",
    "    # w2v Stat\n",
    "    df_w2c = pd.DataFrame()\n",
    "    df_w2c['word_id'] = vocab\n",
    "    df_w2c = pd.concat([df_w2c, pd.DataFrame(w2c_arr)], axis=1)\n",
    "    df_w2c.columns = ['appid'] + ['appid_{}'.format(model) + '_embedding_' + str(i) for i in range(size)]\n",
    "    df_w2c_feat = df[['uid', 'appid']].merge(df_w2c, on='appid', how='left')\n",
    "\n",
    "    agg = {}\n",
    "    for l in ['appid_{}'.format(model) + '_embedding_' + str(i) for i in range(size)] :\n",
    "        agg[l] = ['mean', 'std', 'max', 'min']\n",
    "\n",
    "    df_agg = df_w2c_feat.groupby('uid').agg(agg)\n",
    "    df_agg.columns = pd.Index(['{}_uid_'.format(model) + prefix  + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n",
    "    df_agg = df_agg.reset_index().sort_values(by=['uid'],ascending=True)\n",
    "    return df_agg\n",
    "\n",
    "def get_gensim_feature(now=None,model='word2vec',size=5,window=10,prefix='active'):\n",
    "\n",
    "    df = now.copy()\n",
    "    if os.path.exists(\"../pickle/{}_{}_emb.pickle\".format(prefix,model)):\n",
    "        return pd.read_pickle(\"../pickle/{}_{}_emb.pickle\".format(prefix,model))\n",
    "    else:\n",
    "        dictionary = corpora.Dictionary(df['appid'].values)\n",
    "        corpus = [dictionary.doc2bow(text) for text in df['appid'].values]\n",
    "        if model=='word2vec':\n",
    "            if os.path.exists(\"../vector/w2v.model\"):\n",
    "                w2v = models.KeyedVectors.load_word2vec_format(\"../vector/w2v.model\", binary=False)\n",
    "            else:\n",
    "                w2v = models.Word2Vec(df['appid'].values, size=size, window=window, workers=40)\n",
    "                w2v.wv.save_word2vec_format(\"../vector/w2v.model\")\n",
    "            vocab = list(w2v.wv.vocab.keys())\n",
    "\n",
    "            # Sentence Embedding\n",
    "\n",
    "            w2v_feature = np.zeros((df.shape[0],size))\n",
    "            w2v_feature_avg = np.zeros((df.shape[0],size))\n",
    "\n",
    "            for i,line in tqdm(enumerate(df['appid'].values.tolist())):\n",
    "                num = 0\n",
    "                if line == '':\n",
    "                    w2v_feature_avg[i,:] = np.zeros(size)\n",
    "                else:\n",
    "                    for word in line:\n",
    "                        num += 1\n",
    "                        vec = w2v[word] if word in vocab else np.zeros(size)\n",
    "                        w2v_feature[i,:] += vec\n",
    "                    w2v_feature_avg[i,:] = w2v_feature[i,:] / num\n",
    "            w2v_avg = pd.DataFrame(w2v_feature_avg)\n",
    "            w2v_avg = w2v_avg.add_prefix(\"W2V_AVG_{}_\".format(prefix))\n",
    "            w2v_avg['uid'] = df['uid']\n",
    "            df_agg = w2v_avg\n",
    "\n",
    "        elif model=='lda':\n",
    "            lda = models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=size)\n",
    "            col = np.zeros((df.shape[0],20))\n",
    "            ans = lda.get_document_topics(corpus)\n",
    "            for i in tqdm(range(df.shape[0])):\n",
    "                for j in ans[i]:\n",
    "                    col[i][j[0]] = j[1]\n",
    "\n",
    "            df_agg = pd.DataFrame(col)\n",
    "            df_agg = df_agg.add_prefix(\"LDA_TOPIC_{}_\".format(prefix))\n",
    "            df_agg['uid'] = df['uid']\n",
    "\n",
    "        elif model=='fasttext':\n",
    "            if os.path.exists(\"../vector/fasttext.model\"):\n",
    "                fasttext = models.KeyedVectors.load_word2vec_format(\"../vector/fasttext.model\", binary=False)\n",
    "            else:\n",
    "                fasttext = models.FastText(df['appid'].values, size=size, window=window, workers=40)\n",
    "                fasttext.wv.save_word2vec_format(\"../vector/fasttext.model\")\n",
    "            vocab = list(fasttext.wv.vocab.keys())\n",
    "\n",
    "            fasttext_feature = np.zeros((df.shape[0],size))\n",
    "            fasttext_feature_avg = np.zeros((df.shape[0],size))\n",
    "\n",
    "            for i,line in tqdm(enumerate(df['appid'].values.tolist())):\n",
    "                num = 0\n",
    "                if line == '':\n",
    "                    fasttext_feature_avg[i,:] = np.zeros(size)\n",
    "                else:\n",
    "                    for word in line:\n",
    "                        num += 1\n",
    "                        vec = fasttext[word] if word in vocab else np.zeros(size)\n",
    "                        fasttext_feature[i,:] += vec\n",
    "                    fasttext_feature_avg[i,:] = fasttext_feature[i,:] / num\n",
    "            fasttext_avg = pd.DataFrame(fasttext_feature_avg)\n",
    "            fasttext_avg = fasttext_avg.add_prefix(\"FASTTEXT_AVG_{}\".format(prefix))\n",
    "            fasttext_avg['uid'] = df['uid']\n",
    "            df_agg = fasttext_avg\n",
    "\n",
    "        elif model=='doc2vec':\n",
    "            if os.path.exists(\"../vector/d2v.model\"):\n",
    "                d2v = models.KeyedVectors.load_word2vec_format(\"../vector/d2v.model\", binary=False)\n",
    "            else:            \n",
    "                docs = [TaggedDocument(words=i[1],tags=[str(i[0])]) for i in df[['uid','appid']].values]\n",
    "                d2v = models.Doc2Vec(docs,size=size,window=window,workers=40)\n",
    "                d2v.wv.save_word2vec_format(\"../vector/d2v.model\")\n",
    "            vocab = list(d2v.wv.vocab.keys())\n",
    "            \n",
    "            d2v_avg = []\n",
    "            for i in tqdm(df['appid'].values):\n",
    "                line = []\n",
    "                for j in i:\n",
    "                    line.append(d2v[j] if j in vocab else 0)\n",
    "                d2v_avg.append(np.mean(line,axis=0))\n",
    "            d2v_avg = pd.DataFrame(d2v_avg)\n",
    "            d2v_avg = d2v_avg.add_prefix(\"d2v_AVG_{}\".format(prefix))\n",
    "            d2v_avg['uid'] = df['uid']\n",
    "            df_agg = d2v_avg\n",
    "\n",
    "        elif model=='lsi':\n",
    "            lsi = models.LsiModel(corpus=corpus, id2word=dictionary, num_topics=size)\n",
    "            df_agg = []\n",
    "            for i in tqdm(df['appid'].values):\n",
    "                lsi_ = lsi[dictionary.doc2bow(i)]\n",
    "                df_agg.append([tmp[1] for tmp in lsi[lsi_]])\n",
    "\n",
    "            df_agg = pd.DataFrame(df_agg)\n",
    "            df_agg = df_agg.add_prefix(\"LSI_TOPIC_{}_\".format(prefix))\n",
    "            df_agg['uid'] = df['uid']\n",
    "            \n",
    "        elif model=='glove':\n",
    "            matrix =  Corpus()\n",
    "            matrix.fit(df['appid'].values)\n",
    "            glove = Glove(no_components=size, learning_rate=0.05)\n",
    "            glove.fit(matrix.matrix,epochs=10,no_threads=30,verbose=1)\n",
    "            glove.add_dictionary(matrix.dictionary)\n",
    "            ans = []\n",
    "            for i in tqdm(df['appid'].values):\n",
    "                line = []\n",
    "                for j in i:\n",
    "                    line.append(glove.word_vectors[glove.dictionary[j]])\n",
    "                ans.append(np.mean(line,axis=0))\n",
    "            df_agg = pd.DataFrame(ans)\n",
    "            df_agg = df_agg.add_prefix(\"Glove_AVG_{}\".format(prefix))\n",
    "            df_agg['uid'] = df['uid']\n",
    "            \n",
    "        df_agg.to_pickle(\"../pickle/{}_{}_emb.pickle\".format(prefix,model))\n",
    "    \n",
    "    return df_agg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 99%|█████████▉| 4942930/4999341 [50:48<00:25, 2244.35it/s] "
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "lsi = get_gensim_feature(active,'lsi',32,10,'active')\n",
    "w2v = get_gensim_feature(active,'word2vec',64,10,'active')\n",
    "fasttext = get_gensim_feature(active,'fasttext',64,10,'active')\n",
    "d2v = get_gensim_feature(active,'doc2vec',64,10,'active')\n",
    "lda = get_gensim_feature(active,'lda',20,10,'active')\n",
    "\n",
    "lsi_1 = get_gensim_feature(usage,'lsi',32,10,'usage')\n",
    "w2v_1 = get_gensim_feature(usage,'word2vec',64,10,'usage')\n",
    "fasttext_1 = get_gensim_feature(usage,'fasttext',64,10,'usage')\n",
    "d2v_1 = get_gensim_feature(usage,'doc2vec',64,10,'usage')\n",
    "lda_1 = get_gensim_feature(usage,'lda',20,10,'usage')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
