{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/hzy/anaconda3/anaconda/lib/python3.6/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n",
      "  \"This module will be removed in 0.20.\", DeprecationWarning)\n"
     ]
    }
   ],
   "source": [
    "from __future__ import print_function\n",
    "import xgboost\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "import functools\n",
    "from sklearn.metrics import r2_score\n",
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.linear_model import Ridge, Lasso\n",
    "from sklearn.svm import SVR\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from src.dataset import Dataset, PytorchDataset\n",
    "from src import utils\n",
    "import torch\n",
    "import torch.nn\n",
    "from torch import nn\n",
    "from torch.autograd import Variable\n",
    "from src.utils import kfold_validation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "d = Dataset('./data/sourceAB_shanghai_mall.csv')\n",
    "\n",
    "X_train, X_val, X_trainval, X_test, y_train, y_val, y_trainval, y_test = \\\n",
    "    utils.make_split(d.Xab, d.y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 单个域"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 128,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "A 0.505335941112\n",
      "B 0.461520910165\n"
     ]
    }
   ],
   "source": [
    "rgs = LinearRegression()\n",
    "n = 100\n",
    "print('A', sum([kfold_validation(d.Xab[:, :6], d.y, 5, model=rgs) for i in range(n)]) / n)\n",
    "print('B', sum([kfold_validation(d.Xab[:, 6:], d.y, 5, model=rgs) for i in range(n)]) / n)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 129,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "A 0.508660001088\n",
      "B 0.46279871205\n"
     ]
    }
   ],
   "source": [
    "rgs = Ridge(alpha=20)\n",
    "n = 100\n",
    "print('A', sum([kfold_validation(d.Xab[:, :6], d.y, 5, model=rgs) for i in range(n)]) / n)\n",
    "print('B', sum([kfold_validation(d.Xab[:, 6:], d.y, 5, model=rgs) for i in range(n)]) / n)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "A 0.481630487109\n",
      "B 0.604708412261\n"
     ]
    }
   ],
   "source": [
    "rgs = SVR(C=5)\n",
    "n = 100\n",
    "print('A', sum([kfold_validation(d.Xab[:, :6], d.y, 5, model=rgs) for i in range(n)]) / n)\n",
    "print('B', sum([kfold_validation(d.Xab[:, 6:], d.y, 5, model=rgs) for i in range(n)]) / n)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 134,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "A 0.477267028884\n",
      "B 0.624468147522\n"
     ]
    }
   ],
   "source": [
    "rgs = xgboost.XGBRegressor(max_depth=2, n_estimators=100)\n",
    "n = 100\n",
    "print('A', sum([kfold_validation(d.Xab[:, :6], d.y, 5, model=rgs) for i in range(n)]) / n)\n",
    "print('B', sum([kfold_validation(d.Xab[:, 6:], d.y, 5, model=rgs) for i in range(n)]) / n)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 各个经典方法 多次交叉验证"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.51712214582323512"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rgs = LinearRegression()\n",
    "n = 100\n",
    "sum([kfold_validation(d.Xab, d.y, 5, model=rgs) for i in range(n)]) / n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.53068755958935421"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rgs = Ridge(alpha=20)\n",
    "n = 100\n",
    "sum([kfold_validation(d.Xab, d.y, 5, model=rgs) for i in range(n)]) / n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.59894089644331094"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rgs = SVR(C=5)\n",
    "n = 100\n",
    "sum([kfold_validation(d.Xab, d.y, 5, model=rgs) for i in range(n)]) / n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.61903537285025256"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rgs = xgboost.XGBRegressor(max_depth=2, n_estimators=150)\n",
    "n = 100\n",
    "sum([kfold_validation(d.Xab, d.y, 5, model=rgs) for i in range(n)]) / n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 各个经典方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.41937493588703961"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rgs = LinearRegression()\n",
    "rgs.fit(X_train, y_train)\n",
    "rgs.score(X_test, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.462846012697\n",
      "{'alpha': 20}\n"
     ]
    }
   ],
   "source": [
    "parameters = {'alpha': (0, 0.5, 1, 2, 5, 10, 15, 20)}\n",
    "rgs = GridSearchCV(Ridge(), parameters)\n",
    "rgs.fit(X_trainval, y_trainval)\n",
    "print(rgs.score(X_test, y_test))\n",
    "print(rgs.best_params_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.510486908968\n",
      "{'C': 5, 'kernel': 'rbf'}\n"
     ]
    }
   ],
   "source": [
    "parameters = {'kernel':('linear', 'rbf'), 'C':[1, 5, 10]}\n",
    "rgs = GridSearchCV(SVR(), parameters)\n",
    "rgs.fit(X_trainval, y_trainval)\n",
    "print(rgs.score(X_test, y_test))\n",
    "print(rgs.best_params_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.574354025714\n",
      "{'max_depth': 2, 'n_estimators': 150}\n"
     ]
    }
   ],
   "source": [
    "parameters = {'max_depth': (2, 3, 4) , 'n_estimators': [10, 50, 100, 150]}\n",
    "rgs = GridSearchCV(xgboost.XGBRegressor(), parameters)\n",
    "rgs.fit(X_trainval, y_trainval)\n",
    "print(rgs.score(X_test, y_test))\n",
    "print(rgs.best_params_)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 神经网络方法1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from src.models import AWNetwork\n",
    "from src.utils import _T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class AWRegressor(object):\n",
    "    \n",
    "    def __init__(self, hiddena=20, hiddenb=20, lr=0.1):\n",
    "        self.hiddena = hiddena\n",
    "        self.hiddenb = hiddenb\n",
    "        self.awnetwork = AWNetwork(hiddena, hiddenb)\n",
    "        self.lr = 0.1\n",
    "    \n",
    "    def fit(self, X, y, X_val, y_val, verbose=False):\n",
    "        num_epochs = 30\n",
    "\n",
    "        d_ = PytorchDataset(X[:, :6], X[:, 6:], y)\n",
    "        train_loader = torch.utils.data.DataLoader(dataset=d_, batch_size=32, shuffle=True)\n",
    "        net = self.awnetwork\n",
    "        best_model = AWNetwork(self.hiddena, self.hiddenb)\n",
    "        best_score = None\n",
    "        criterion = nn.MSELoss()\n",
    "        lr = self.lr\n",
    "\n",
    "        for epoch in range(num_epochs):\n",
    "            net.train()\n",
    "            optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n",
    "            for i, (Xa, Xb, y) in enumerate(train_loader):  \n",
    "                # Convert torch tensor to Variable\n",
    "                Xa = Variable(Xa)\n",
    "                Xb = Variable(Xb)\n",
    "                y = Variable(y)\n",
    "\n",
    "                optimizer.zero_grad()\n",
    "                oa, _, ob, _, o = net(Xa, Xb)\n",
    "                loss = criterion(oa, y) + criterion(ob, y) + criterion(o, y)\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "            if X_val is not None:\n",
    "                net.eval()\n",
    "                _, _, _, _, pred_val = net(_T(X_val[:, :6]), _T(X_val[:, 6:]))\n",
    "                score = r2_score(y_val, pred_val.data.numpy())\n",
    "#                 print('end of epoch: {} r2_score on valid set: {}'.format(epoch + 1, score))\n",
    "                if not best_score or score > best_score:\n",
    "                    best_score = score\n",
    "                    best_model.load_state_dict(net.state_dict())\n",
    "                    self.best_model = best_model\n",
    "                else:\n",
    "                    lr /= 3.\n",
    "\n",
    "    def score(self, X, y):\n",
    "        self.best_model.eval()\n",
    "        _, _, _, _, pred_test = self.best_model(_T(X_test[:, :6]), _T(X_test[:, 6:]))\n",
    "        score = r2_score(y_test, pred_test.data.numpy())\n",
    "        return score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 149,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.70467192811370738"
      ]
     },
     "execution_count": 149,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rgs = AWRegressor(hiddena=50, hiddenb=40)\n",
    "n = 100\n",
    "def do():\n",
    "    X_train, X_val, X_trainval, X_test, y_train, y_val, y_trainval, y_test = \\\n",
    "        utils.make_split(d.Xab, d.y)\n",
    "    rgs.fit(X_train , y_train , X_val, y_val)\n",
    "    return rgs.score(X_test, y_test)\n",
    "sum([do() for i in range(n)]) / n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 神经网络方法2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 150,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "best valid score 0.604386777758\n",
      "best test score 0.603727087112\n"
     ]
    }
   ],
   "source": [
    "from src.models import AWNetworkPlus\n",
    "from src.utils import _T\n",
    "\n",
    "num_epochs = 100\n",
    "\n",
    "d_ = PytorchDataset(X_train[:, :6], X_train[:, 6:], y_train)\n",
    "train_loader = torch.utils.data.DataLoader(dataset=d_, batch_size=32, shuffle=True)\n",
    "net = AWNetworkPlus()\n",
    "best_model = AWNetworkPlus()\n",
    "best_score = None\n",
    "criterion = nn.MSELoss()  \n",
    "optimizer = torch.optim.Adam(net.parameters(), lr=0.1)  \n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    net.train()\n",
    "    for i, (Xa, Xb, y) in enumerate(train_loader):  \n",
    "        # Convert torch tensor to Variable\n",
    "        Xa = Variable(Xa)\n",
    "        Xb = Variable(Xb)\n",
    "        y = Variable(y)\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        oa, _, ob, _, o = net(Xa, Xb)\n",
    "        loss = criterion(oa, y) + criterion(ob, y) + criterion(o, y)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "    net.eval()\n",
    "    _, _, _, _, pred_val = net(_T(X_val[:, :6]), _T(X_val[:, 6:]))\n",
    "    score = r2_score(y_val, pred_val.data.numpy())\n",
    "    # print('end of epoch: {} r2_score on valid set: {}'.format(epoch + 1, score))\n",
    "    if not best_score or score > best_score:\n",
    "        best_score = score\n",
    "        best_model.load_state_dict(net.state_dict())\n",
    "best_model.eval()\n",
    "_, _, _, _, pred_test = best_model(_T(X_test[:, :6]), _T(X_test[:, 6:]))\n",
    "score = r2_score(y_test, pred_test.data.numpy())\n",
    "print('best valid score', best_score)\n",
    "print('best test score', score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
