{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((173596669, 2), (840560515, 5))"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n",
    "from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer,HashingVectorizer\n",
    "from sklearn.decomposition import TruncatedSVD,SparsePCA\n",
    "from sklearn.model_selection import KFold,StratifiedKFold\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import accuracy_score,roc_auc_score,f1_score,recall_score\n",
    "\n",
    "import gc\n",
    "import time\n",
    "import os\n",
    "import sys\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "pickle_path = \"../pickle\"\n",
    "active = pd.read_pickle(\"{}/user_app_active_flatten.pickle\".format(pickle_path))\n",
    "usage = pd.read_pickle(\"{}/user_app_usage.pickle\".format(pickle_path))\n",
    "active.shape,usage.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Graph Feature\n",
    "import scipy.sparse\n",
    "from scipy import linalg\n",
    "from scipy.special import iv\n",
    "import scipy.sparse as sp\n",
    "\n",
    "from sklearn import preprocessing\n",
    "from sklearn.utils.extmath import randomized_svd\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "\n",
    "import argparse\n",
    "import time\n",
    "\n",
    "class ProNE():\n",
    "    def __init__(self, G, emb_size=128, step=10, theta=0.5, mu=0.2, n_iter=5, random_state=2019):\n",
    "        self.G = G\n",
    "        self.emb_size = emb_size\n",
    "        self.G = self.G.to_undirected()\n",
    "        self.node_number = self.G.number_of_nodes()\n",
    "        self.random_state = random_state\n",
    "        self.step = step\n",
    "        self.theta = theta\n",
    "        self.mu = mu\n",
    "        self.n_iter = n_iter\n",
    "        \n",
    "        mat = scipy.sparse.lil_matrix((self.node_number, self.node_number))\n",
    "\n",
    "        for e in tqdm(self.G.edges()):\n",
    "            if e[0] != e[1]:\n",
    "                mat[int(e[0]), int(e[1])] = 1\n",
    "                mat[int(e[1]), int(e[0])] = 1\n",
    "        self.mat = scipy.sparse.csr_matrix(mat)\n",
    "        print(mat.shape)\n",
    "\n",
    "    def get_embedding_rand(self, matrix):\n",
    "        # Sparse randomized tSVD for fast embedding\n",
    "        t1 = time.time()\n",
    "        l = matrix.shape[0]\n",
    "        smat = scipy.sparse.csc_matrix(matrix)  # convert to sparse CSC format\n",
    "        print('svd sparse', smat.data.shape[0] * 1.0 / l ** 2)\n",
    "        U, Sigma, VT = randomized_svd(smat, n_components=self.emb_size, n_iter=self.n_iter, random_state=self.random_state)\n",
    "        U = U * np.sqrt(Sigma)\n",
    "        U = preprocessing.normalize(U, \"l2\")\n",
    "        print('sparsesvd time', time.time() - t1)\n",
    "        return U\n",
    "\n",
    "    def get_embedding_dense(self, matrix, emb_size):\n",
    "        # get dense embedding via SVD\n",
    "        t1 = time.time()\n",
    "        U, s, Vh = linalg.svd(matrix, full_matrices=False, check_finite=False, overwrite_a=True)\n",
    "        U = np.array(U)\n",
    "        U = U[:, :emb_size]\n",
    "        s = s[:emb_size]\n",
    "        s = np.sqrt(s)\n",
    "        U = U * s\n",
    "        U = preprocessing.normalize(U, \"l2\")\n",
    "        print('densesvd time', time.time() - t1)\n",
    "        return U\n",
    "\n",
    "    def fit(self, tran, mask):\n",
    "        # Network Embedding as Sparse Matrix Factorization\n",
    "        t1 = time.time()\n",
    "        l1 = 0.75\n",
    "        C1 = preprocessing.normalize(tran, \"l1\")\n",
    "        neg = np.array(C1.sum(axis=0))[0] ** l1\n",
    "\n",
    "        neg = neg / neg.sum()\n",
    "\n",
    "        neg = scipy.sparse.diags(neg, format=\"csr\")\n",
    "        neg = mask.dot(neg)\n",
    "        print(\"neg\", time.time() - t1)\n",
    "\n",
    "        C1.data[C1.data <= 0] = 1\n",
    "        neg.data[neg.data <= 0] = 1\n",
    "\n",
    "        C1.data = np.log(C1.data)\n",
    "        neg.data = np.log(neg.data)\n",
    "\n",
    "        C1 -= neg\n",
    "        F = C1\n",
    "        features_matrix = self.get_embedding_rand(F)\n",
    "        return features_matrix\n",
    "\n",
    "    def chebyshev_gaussian(self, A, a, order=10, mu=0.5, s=0.5):\n",
    "        # NE Enhancement via Spectral Propagation\n",
    "        print('Chebyshev Series -----------------')\n",
    "        t1 = time.time()\n",
    "\n",
    "        if order == 1:\n",
    "            return a\n",
    "\n",
    "        A = sp.eye(self.node_number) + A\n",
    "        DA = preprocessing.normalize(A, norm='l1')\n",
    "        L = sp.eye(self.node_number) - DA\n",
    "\n",
    "        M = L - mu * sp.eye(self.node_number)\n",
    "\n",
    "        Lx0 = a\n",
    "        Lx1 = M.dot(a)\n",
    "        Lx1 = 0.5 * M.dot(Lx1) - a\n",
    "\n",
    "        conv = iv(0, s) * Lx0\n",
    "        conv -= 2 * iv(1, s) * Lx1\n",
    "        for i in range(2, order):\n",
    "            Lx2 = M.dot(Lx1)\n",
    "            Lx2 = (M.dot(Lx2) - 2 * Lx1) - Lx0\n",
    "            #         Lx2 = 2*L.dot(Lx1) - Lx0\n",
    "            if i % 2 == 0:\n",
    "                conv += 2 * iv(i, s) * Lx2\n",
    "            else:\n",
    "                conv -= 2 * iv(i, s) * Lx2\n",
    "            Lx0 = Lx1\n",
    "            Lx1 = Lx2\n",
    "            del Lx2\n",
    "            print('Bessell time', i, time.time() - t1)\n",
    "        mm = A.dot(a - conv)\n",
    "        self.embeddings = self.get_embedding_dense(mm, self.emb_size)\n",
    "        return self.embeddings\n",
    "    \n",
    "    def transform(self):\n",
    "        if self.embeddings is None:\n",
    "            print(\"Embedding is not train\")\n",
    "            return {}\n",
    "        self.embeddings = pd.DataFrame(self.embeddings)\n",
    "        self.embeddings.columns = ['ProNE_Emb_{}'.format(i) for i in range(len(self.embeddings.columns))]\n",
    "        self.embeddings = self.embeddings.reset_index().rename(columns={'index' : 'nodes'}).sort_values(by=['nodes'],ascending=True).reset_index(drop=True)\n",
    "\n",
    "        return self.embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm\n",
    "import networkx as nx\n",
    "import igraph as ig\n",
    "\n",
    "def get_graph_embedding(df,prefix):\n",
    "    \n",
    "    uid_lbl,appid_lbl = LabelEncoder(),LabelEncoder()\n",
    "    df['new_uid'] = uid_lbl.fit_transform(df['uid'])\n",
    "    df['new_appid'] = appid_lbl.fit_transform(df['appid'])\n",
    "    df['new_appid'] += df['new_uid'].max() + 1\n",
    "    \n",
    "    print(\"Encoder Finished...\")\n",
    "    \n",
    "    G = ig.Graph()\n",
    "    G.add_vertices(df['new_appid'].max()+1)\n",
    "    G.add_edges(df[['new_uid','new_appid']].values)\n",
    "    print(\"Build Graph Finished...\")\n",
    "    evcent = G.evcent() # 计算图中节点的向量中心性\n",
    "    shell_index = G.shell_index() # 计算图中节点度至少为K的最大子图\n",
    "    degree = G.degree() # 总度数\n",
    "    pagerank = G.pagerank() # pagerank\n",
    "    # 以下4个计算的很慢，效果还不错，可以注释掉，观察evcent的效果\n",
    "    closeness = G.closeness() # 计算节点与网络中其他所有节点的距离的平均值   \n",
    "    betweenness = G.betweenness() # 计算节点的介值\n",
    "    constraint = G.constraint()\n",
    "    eccentricity = G.eccentricity() # 计算给定节点到图中其他节点的最短距离的最大值。\n",
    "    \n",
    "    G_stat = pd.DataFrame()\n",
    "    G_stat['evcent'] = evcent\n",
    "    G_stat['shell_index'] = shell_index\n",
    "    G_stat['degree'] = degree\n",
    "    G_stat['pagerank'] = pagerank\n",
    "    print(\"PR Finished...\")\n",
    "    G_stat['closeness'] = closeness\n",
    "    G_stat['betweenness'] = betweenness\n",
    "    G_stat['constraint'] = constraint\n",
    "    G_stat['eccentricity'] = eccentricity\n",
    "    G_stat = G_stat.reset_index()\n",
    "    G_stat = G_stat[G_stat['index'].isin(df['new_uid'])]\n",
    "    G_stat['index'] = uid_lbl.inverse_transform(G_stat['index'])\n",
    "    \n",
    "    print(\"Graph Stat Finished...\")\n",
    "    G_stat.to_pickle(\"../pickle/Graph_Stat_{}.pickle\".format(prefix))\n",
    "    \n",
    "    del G\n",
    "    \n",
    "    import gc\n",
    "    gc.collect()\n",
    "    \n",
    "    G = nx.Graph()\n",
    "    G.add_edges_from(df[['new_uid','new_appid']].values)\n",
    "    model = ProNE(G,emb_size=32,n_iter=6,step=12)\n",
    "    features_matrix = model.fit(model.mat, model.mat)\n",
    "    model.chebyshev_gaussian(model.mat, features_matrix, model.step, model.mu, model.theta)\n",
    "    emb = model.transform()\n",
    "    fea = emb[emb['nodes'].isin(df['new_uid'])]\n",
    "    fea['nodes'] = uid_lbl.inverse_transform(fea['nodes'])\n",
    "    fea.rename(columns={'nodes' : 'uid'},inplace=True)\n",
    "    del G\n",
    "    gc.collect()\n",
    "    print(\"Embedding Finished...\")\n",
    "    fea.to_pickle(\"../pickle/Graph_Bi_{}.pickle\".format(prefix))\n",
    "    \n",
    "    return fea,G_stat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Encoder Finished...\n",
      "PR Finished...\n",
      "Graph Stat Finished...\n"
     ]
    }
   ],
   "source": [
    "fea1,stat1 = get_graph_embedding(active,'active').set_index('uid').add_prefix(\"active_\").reset_index()\n",
    "fea0,stat0 = get_graph_embedding(usage,'usage').set_index('uid').add_prefix(\"usage_\").reset_index()\n",
    "\n",
    "# fea0.to_pickle(\"../pickle/usage_bi_graph_ProNE.pickle\")\n",
    "# fea1.to_pickle(\"../pickle/active_bi_graph_ProNE.pickle\")\n",
    "# stat0.to_pickle(\"../pickle/usage_graph_stat.pickle\")\n",
    "# stat1.to_pickle(\"../pickle/active_graph_stat.pickle\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
