{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [],
   "source": [
    "from datetime import datetime\n",
    "from csv import DictReader\n",
    "from math import exp, log, sqrt\n",
    "from collections import defaultdict\n",
    "import random\n",
    "import pickle"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "# B, model\n",
    "alpha = .05 # learning rate\n",
    "beta = 1.   # smoothing parameter for adaptive learning rate\n",
    "L1 = .4     # L1 regularization, larger value means more regularized\n",
    "L2 = .1     # L2 regularization, larger value means more regularized\n",
    "# C, feature/hash trick\n",
    "D = 2 ** 26            # number of weights to use\n",
    "interaction = True     # whether to enable poly2 feature interactions\n",
    "SUB = True\n",
    "# D, training/validation\n",
    "epoch = 3       # learn training data for N passes\n",
    "holdafter = 30 if SUB else 28  # data after date N (exclusive) are used as validation\n",
    "holdout = None  # use every N training instance for holdout validation"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "name": "stdout",
     "text": [
      "C1 - site_id\n",
      "C1 - app_id\n",
      "banner_pos - site_id\n",
      "banner_pos - app_id\n",
      "banner_pos - device_id\n",
      "banner_pos - device_model\n",
      "site_id - device_model\n",
      "site_id - device_conn_type\n",
      "site_id - C14\n",
      "app_id - device_model\n",
      "app_id - device_conn_type\n",
      "app_id - C14\n",
      "ipcate - site_id\n",
      "ipcate - app_id\n",
      "C20 - site_id\n",
      "C20 - app_id\n",
      "device_id - site_id\n",
      "device_id - app_id\n",
      "device_id - C14\n",
      "ips - site_id\n",
      "ips - app_id\n",
      "ips - C17\n",
      "C22 - site_id\n",
      "C22 - app_id\n",
      "C23 - site_id\n",
      "C23 - app_id\n",
      "C24 - site_id\n",
      "C24 - app_id\n",
      "C25 - site_id\n",
      "C25 - app_id\n",
      "C26 - site_id\n",
      "C26 - app_id\n",
      "C27 - site_id\n",
      "C27 - app_id\n",
      "C28 - site_id\n",
      "C28 - app_id\n",
      "C22 - C23\n",
      "C22 - C24\n",
      "C22 - C25\n",
      "C22 - C26\n",
      "C22 - C27\n",
      "C22 - C28\n"
     ],
     "output_type": "stream"
    }
   ],
   "source": [
    "#interactions\n",
    "inter_s = \"ac,af,bc,bf,bi,bk,ck,cm,cn,fk,fm,fn,wc,wf,tc,tf,ic,if,in,jc,jf,jq,xc,xf,yc,yf,zc,zf,1c,1f,2c,2f,3c,3f,4c,4f,xy,xz,x1,x2,x3,x4\"\n",
    "\n",
    "inter = []\n",
    "featdict = {\"a\":\"C1\",\"b\":\"banner_pos\",\"c\":\"site_id\",\"d\":\"site_domain\",\"e\":\"site_category\",\"f\":\"app_id\",\"g\":\"app_domain\",\"h\":\"app_category\",\"i\":\"device_id\",\"j\":\"ips\",\"k\":\"device_model\",\"l\":\"device_type\",\"m\":\"device_conn_type\",\"n\":\"C14\",\"o\":\"C15\",\"p\":\"C16\",\"q\":\"C17\",\"r\":\"C18\",\"s\":\"C19\",\"t\":\"C20\",\"u\":\"C21\",\"w\":\"ipcate\",\"x\":\"C22\",\"y\":\"C23\",\"z\":\"C24\",\"1\":\"C25\",\"2\":\"C26\",\"3\":\"C27\",\"4\":\"C28\"}\n",
    "for i in inter_s.split(\",\"):\n",
    "    inter.append((featdict[i[0]],featdict[i[1]]))\n",
    "    print(featdict[i[0]],\"-\",featdict[i[1]])"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "for i in range(29,49):\n",
    "\tco = \"C\" + str(i)\n",
    "\tinter.append((co,\"site_id\"))\n",
    "\tinter.append((co,\"app_id\"))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [],
   "source": [
    "fsic = open(\"testcase.pkl\",'rb')\n",
    "ipcate = pickle.load(fsic)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "def convt(s,t):\n",
    "    s = s.split(\"_\")[1]\n",
    "    if int(s) <= 70:\n",
    "        return t + \"_\" + s\n",
    "    else:\n",
    "        return t + \"_\" + \"l\"\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "class ftrl_proximal(object):\n",
    "    ''' Our main algorithm: Follow the regularized leader - proximal\n",
    "\n",
    "        In short,\n",
    "        this is an adaptive-learning-rate sparse logistic-regression with\n",
    "        efficient L1-L2-regularization\n",
    "\n",
    "        Reference:\n",
    "        http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf\n",
    "    '''\n",
    "\n",
    "    def __init__(self, alpha, beta, L1, L2, D, interaction):\n",
    "        # parameters\n",
    "        self.alpha = alpha\n",
    "        self.beta = beta\n",
    "        self.L1 = L1\n",
    "        self.L2 = L2\n",
    "\n",
    "        # feature related parameters\n",
    "        self.D = D\n",
    "        self.interaction = interaction\n",
    "\n",
    "        # model\n",
    "        # n: squared sum of past gradients\n",
    "        # z: weights\n",
    "        # w: lazy weights\n",
    "        self.n = [0.] * D\n",
    "        self.z = [0.] * D\n",
    "        self.w = [0.] * D\n",
    "\n",
    "    def _indices(self, x):\n",
    "        ''' A helper generator that yields the indices in x\n",
    "\n",
    "            The purpose of this generator is to make the following\n",
    "            code a bit cleaner when doing feature interaction.\n",
    "        '''\n",
    "\n",
    "        # first yield index of the bias term\n",
    "        yield 0\n",
    "\n",
    "        D = self.D\n",
    "        # then yield the normal indices\n",
    "        for feat in x:\n",
    "            index = abs(hash(feat)) % D\n",
    "            yield index\n",
    "\n",
    "    def predict(self, x):\n",
    "        ''' Get probability estimation on x\n",
    "\n",
    "            INPUT:\n",
    "                x: features\n",
    "\n",
    "            OUTPUT:\n",
    "                probability of p(y = 1 | x; w)\n",
    "        '''\n",
    "\n",
    "        # model\n",
    "        w = self.w\n",
    "\n",
    "        # wTx is the inner product of w and x\n",
    "        wTx = 0.\n",
    "        for i in self._indices(x):\n",
    "            wTx += w[i]\n",
    "\n",
    "        # bounded sigmoid function, this is the probability estimation\n",
    "        return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))\n",
    "\n",
    "    def update(self, x, p, y):\n",
    "        ''' Update model using x, p, y\n",
    "\n",
    "            INPUT:\n",
    "                x: feature, a list of indices\n",
    "                p: click probability prediction of our model\n",
    "                y: answer\n",
    "\n",
    "            MODIFIES:\n",
    "                self.n: increase by squared gradient\n",
    "                self.z: weights\n",
    "        '''\n",
    "\n",
    "        # parameter\n",
    "        alpha = self.alpha\n",
    "\n",
    "        # model\n",
    "        n = self.n\n",
    "        z = self.z\n",
    "        w = self.w\n",
    "\n",
    "        # gradient under logloss\n",
    "        g = p - y\n",
    "\n",
    "        # update z and n\n",
    "        tmp = 0\n",
    "        for i in self._indices(x):\n",
    "            sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha\n",
    "            z[i] += g - sigma * w[i]\n",
    "            n[i] += g * g\n",
    "            sign = -1. if z[i] < 0 else 1.  # get sign of z[i]\n",
    "            # build w using z and n\n",
    "            if sign * z[i] <= L1:\n",
    "                # w[i] vanishes due to L1 regularization\n",
    "                w[i] = 0.\n",
    "            else:\n",
    "                # apply prediction time L1, L2 regularization to z and get w\n",
    "                w[i] = (sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [],
   "source": [
    "def logloss(p, y):\n",
    "    ''' FUNCTION: Bounded logloss\n",
    "\n",
    "        INPUT:\n",
    "            p: our prediction\n",
    "            y: real answer\n",
    "\n",
    "        OUTPUT:\n",
    "            logarithmic loss of p given y\n",
    "    '''\n",
    "\n",
    "    p = max(min(p, 1. - 10e-15), 10e-15)\n",
    "    return -log(p) if y == 1. else -log(1. - p)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "outputs": [
    {
     "data": {
      "text/plain": "'ssdddf12'"
     },
     "metadata": {},
     "output_type": "execute_result",
     "execution_count": 9
    }
   ],
   "source": [
    "'ssdddf12345'[:-3]"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "outputs": [],
   "source": [
    "def data(path, D):\n",
    "    ''' GENERATOR: Apply hash-trick to the original csv row\n",
    "                   and for simplicity, we one-hot-encode everything\n",
    "\n",
    "        INPUT:\n",
    "            path: path to training or testing file\n",
    "            D: the max index that we can hash to\n",
    "\n",
    "        YIELDS:\n",
    "            ID: id of the instance, mainly useless\n",
    "            x: a list of hashed and one-hot-encoded 'indices'\n",
    "               we only need the index since all values are either 0 or 1\n",
    "            y: y = 1 if we have a click, else we have y = 0\n",
    "    '''\n",
    "\n",
    "    for t, row in enumerate(DictReader(open(path))):\n",
    "        # process id\n",
    "        ID = row['id']\n",
    "        del row['id']\n",
    "\n",
    "        # process clicks\n",
    "        y = 0.\n",
    "        if 'click' in row:\n",
    "            if row['click'] == '1':\n",
    "                y = 1.\n",
    "            del row['click']\n",
    "        \n",
    "        # turn hour really into hour, it was originally YYMMDDHH\n",
    "\n",
    "        date = int(row[\"hour\"][4:6])\n",
    "        row[\"C28\"] = convt(row[\"C28\"],\"C28\")\n",
    "        del row[\"hour\"]\n",
    "        row[\"ips\"] = row[\"device_ip\"]\n",
    "        if row[\"device_ip\"][-3:] == \"ips\":\n",
    "            row[\"device_ip\"] = \"ips\"\n",
    "            row[\"ips\"] = row[\"ips\"][:-3]\n",
    "        row[\"ipcate\"] = \"ipcate_null\"\n",
    "        if row[\"ips\"][10:] in ipcate:\n",
    "            row[\"ipcate\"] = \"ipcate_\" + str(ipcate[row[\"ips\"][10:]])\n",
    "        # build x\n",
    "        x = []\n",
    "        if interaction:\n",
    "            for pair in inter:\n",
    "                x.append(row[pair[0]] + \"_\" + row[pair[1]])\n",
    "        del row[\"ips\"]\n",
    "        for key in row:\n",
    "            value = row[key]\n",
    "            # one-hot encode everything with hash trick\n",
    "            x.append(value)\n",
    "        yield t, date, ID, x, y"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "train_df = pd.read_csv('train_over.csv',nrows=30)\n",
    "train_df.to_csv(\"train_bad.csv\",index=False)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "outputs": [
    {
     "name": "stdout",
     "text": [
      "train: 500000\n",
      "train: 1000000\n",
      "train: 1500000\n",
      "train: 2000000\n",
      "train: 2500000\n",
      "train: 3000000\n",
      "train: 3500000\n",
      "train: 4000000\n",
      "train: 4500000\n",
      "train: 5000000\n",
      "train: 5500000\n",
      "train: 6000000\n",
      "train: 6500000\n",
      "train: 7000000\n",
      "train: 7500000\n",
      "train: 8000000\n",
      "train: 8500000\n",
      "train: 9000000\n",
      "train: 9500000\n",
      "train: 10000000\n",
      "Epoch 0 finished, validation logloss: 0.000000, elapsed time: 0:47:55.941357\n",
      "0.05\n",
      "train: 500000\n",
      "train: 1000000\n",
      "train: 1500000\n",
      "train: 2000000\n",
      "train: 2500000\n",
      "train: 3000000\n",
      "train: 3500000\n",
      "train: 4000000\n",
      "train: 4500000\n",
      "train: 5000000\n",
      "train: 5500000\n",
      "train: 6000000\n",
      "train: 6500000\n",
      "train: 7000000\n",
      "train: 7500000\n",
      "train: 8000000\n",
      "train: 8500000\n",
      "train: 9000000\n",
      "train: 9500000\n",
      "train: 10000000\n",
      "Epoch 1 finished, validation logloss: 0.000000, elapsed time: 1:34:49.436363\n",
      "0.04\n",
      "train: 500000\n",
      "train: 1000000\n",
      "train: 1500000\n",
      "train: 2000000\n",
      "train: 2500000\n",
      "train: 3000000\n",
      "train: 3500000\n",
      "train: 4000000\n",
      "train: 4500000\n",
      "train: 5000000\n",
      "train: 5500000\n",
      "train: 6000000\n",
      "train: 6500000\n",
      "train: 7000000\n",
      "train: 7500000\n",
      "train: 8000000\n",
      "train: 8500000\n",
      "train: 9000000\n",
      "train: 9500000\n",
      "train: 10000000\n",
      "Epoch 2 finished, validation logloss: 0.000000, elapsed time: 2:22:24.811388\n",
      "0.030000000000000002\n"
     ],
     "output_type": "stream"
    }
   ],
   "source": [
    "start = datetime.now()\n",
    "train = 'train_over.csv'\n",
    "# initialize ourselves a learner\n",
    "learner = ftrl_proximal(alpha, beta, L1, L2, D, interaction)\n",
    "# start training\n",
    "for e in range(epoch):\n",
    "    loss = 0.\n",
    "    count = 1\n",
    "    localcount = 0\n",
    "    learner.alpha = 0.05 - 0.01 * e\n",
    "    for t, date, ID, x, y in data(train, D):  # data is a generator\n",
    "        #    t: just a instance counter\n",
    "        # date: you know what this is\n",
    "        #   ID: id provided in original data\n",
    "        #    x: features\n",
    "        #    y: label (click)\n",
    "\n",
    "        # step 1, get prediction from learner\n",
    "        p = learner.predict(x)\n",
    "        #print progress\n",
    "        localcount += 1\n",
    "        if localcount % 500000 == 0:\n",
    "            if (holdafter and date > holdafter) or (holdout and t % holdout == 0):\n",
    "                print(\"valid: \" + str(localcount))\n",
    "            else:\n",
    "                print(\"train: \" + str(localcount))\n",
    "\n",
    "        if (holdafter and date > holdafter) or (holdout and t % holdout == 0):\n",
    "            # step 2-1, calculate validation loss\n",
    "            #           we do not train with the validation data so that our\n",
    "            #           validation loss is an accurate estimation\n",
    "            #\n",
    "            # holdafter: train instances from day 1 to day N\n",
    "            #            validate with instances from day N + 1 and after\n",
    "            #\n",
    "            # holdout: validate with every N instance, train with others\n",
    "            loss += logloss(p, y)\n",
    "\n",
    "            count += 1\n",
    "        else:\n",
    "            # step 2-2, update learner with label (click) information\n",
    "            learner.update(x, p, y)\n",
    "    print('Epoch %d finished, validation logloss: %f, elapsed time: %s' % (\n",
    "        e, loss/count, str(datetime.now() - start)))\n",
    "    print(learner.alpha)\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "outputs": [],
   "source": [
    "test = 'test_over.csv'\n",
    "submission= 'sub.csv'"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "outputs": [],
   "source": [
    "def sub():\n",
    "    with open(submission, 'w') as outfile:\n",
    "        for t, date, ID, x, y in data(test, D):\n",
    "            p = learner.predict(x)\n",
    "            outfile.write('%f\\n' % p)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "outputs": [],
   "source": [
    "sub()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "outputs": [],
   "source": [
    "sub_df = pd.read_csv(\"sub.csv\",header=None)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "outputs": [],
   "source": [
    "test_df = pd.read_csv(\"test_sub_count.csv\",dtype=object)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "outputs": [],
   "source": [
    "test_id = test_df.id"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "outputs": [],
   "source": [
    "sub_df.columns = ['click']"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "outputs": [],
   "source": [
    "sub_df.insert(0,'id',test_id)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "outputs": [],
   "source": [
    "sub_df.to_csv('sub_over.csv',index=False)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "python3",
   "language": "python",
   "display_name": "Python 3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "source": [],
    "metadata": {
     "collapsed": false
    }
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}