{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:24:24.347754Z",
     "start_time": "2018-08-28T06:24:24.344746Z"
    }
   },
   "outputs": [],
   "source": [
    "import random as rn\n",
    "import os\n",
    "import numpy as np\n",
    "os.environ['PYTHONHASHSEED'] = '0'\n",
    "np.random.seed(42)\n",
    "rn.seed(123)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:24:26.147512Z",
     "start_time": "2018-08-28T06:24:24.349760Z"
    }
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import pickle\n",
    "from sklearn.linear_model import SGDRegressor\n",
    "from sklearn.model_selection import PredefinedSplit\n",
    "from sklearn.metrics import r2_score\n",
    "from sklearn.model_selection import RandomizedSearchCV\n",
    "from scipy.stats import uniform\n",
    "from scipy.stats import randint as sp_randint\n",
    "from sklearn.metrics import make_scorer\n",
    "pd.set_option('display.max_columns', 50)\n",
    "pd.set_option('display.max_rows', 100)\n",
    "pd.set_option('display.float_format', lambda x: '%.3f' % x)\n",
    "Path = 'D:\\\\APViaML'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:24:26.152524Z",
     "start_time": "2018-08-28T06:24:26.149516Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_demo_dict_data():\n",
    "    file = open(Path + '\\\\data\\\\alldata_demo_top1000.pkl','rb')\n",
    "    raw_data = pickle.load(file)\n",
    "    file.close()\n",
    "    return raw_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:24:26.159542Z",
     "start_time": "2018-08-28T06:24:26.154529Z"
    }
   },
   "outputs": [],
   "source": [
    "def Evaluation_fun(predict_array,real_array):\n",
    "    List1 = []\n",
    "    List2 = []\n",
    "    if len(predict_array) != len(real_array):\n",
    "        print('Something is worng!')\n",
    "    else:\n",
    "        for i in range(len(predict_array)):\n",
    "            List1.append(np.square(predict_array[i]-real_array[i]))\n",
    "            List2.append(np.square(real_array[i]))\n",
    "        result = round(100*(1 - sum(List1)/sum(List2)),3)\n",
    "    return result\n",
    "\n",
    "def my_custom_score_func(ground_truth, predictions):\n",
    "    \n",
    "    num1 =sum((ground_truth-predictions)**2)\n",
    "    num2 = sum(ground_truth**2)\n",
    "    return 1-num1/num2\n",
    "\n",
    "self_score = make_scorer(my_custom_score_func, greater_is_better=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:24:26.171574Z",
     "start_time": "2018-08-28T06:24:26.161548Z"
    },
    "code_folding": []
   },
   "outputs": [],
   "source": [
    "def rolling_model_OLS3_annual(\n",
    "    df_X ,\n",
    "    df_Y ,\n",
    "    num1 = 200):\n",
    "                                    \n",
    "    y_predict_list = []\n",
    "\n",
    "    v_performance_score_list = []\n",
    "    test_performance_score_list = []\n",
    "\n",
    "    for i in range(30):\n",
    "        print(i)\n",
    "        ## define data index\n",
    "        traindata_startyear_str = str(1958) \n",
    "        traindata_endyear_str = str(i + 1987) \n",
    "        vdata_startyear_str = str(i + 1976) \n",
    "        vdata_endyear_str = str(i + 1987) \n",
    "        testdata_startyear_str = str(i + 1988) \n",
    " \n",
    "\n",
    "        ## get data     \n",
    "        X_traindata =  np.array(df_X.loc[traindata_startyear_str:traindata_endyear_str])\n",
    "        Y_traindata = np.array(df_Y.loc[traindata_startyear_str:traindata_endyear_str])\n",
    "        X_vdata = np.array(df_X.loc[vdata_startyear_str:vdata_endyear_str])\n",
    "        Y_vdata = np.array(df_Y.loc[vdata_startyear_str:vdata_endyear_str])\n",
    "        X_testdata = np.array(df_X.loc[testdata_startyear_str])\n",
    "        Y_testdata = np.array(df_Y.loc[testdata_startyear_str])\n",
    "\n",
    "        num_valid_size = len(X_traindata)-len(X_vdata)\n",
    "        \n",
    "        test_fold = -1 * np.ones(len(X_traindata))\n",
    "        test_fold[num_valid_size:] = 0\n",
    "        ps = PredefinedSplit(test_fold)\n",
    "        \n",
    "        \n",
    "        # specify parameters and distributions to sample from\n",
    "        param_dist = {'alpha': uniform(0.00001, 0.1),\n",
    "                      'power_t':uniform(0.1, 0.9),\n",
    "                     'eta0':uniform(0.00001, 0.1),\n",
    "                     'epsilon':uniform(0.01, 0.99),\n",
    "                     'max_iter':sp_randint(1000, len(X_traindata)),\n",
    "                     'tol':[0.01,0.001,0.0001,0.00001],\n",
    "                     'fit_intercept':[True,False]}\n",
    "        \n",
    "        clf = SGDRegressor(shuffle =False,\n",
    "                 loss = 'huber',\n",
    "                 penalty= 'none',random_state=100)\n",
    "        \n",
    "        # run randomized search\n",
    "        n_iter_search = num1\n",
    "        estim = RandomizedSearchCV(clf, param_distributions=param_dist,\n",
    "                                           n_iter=n_iter_search,scoring=self_score,\n",
    "                                          cv=ps.split(),iid=False,random_state=100)\n",
    "                        \n",
    "        estim.fit(X_traindata, Y_traindata)\n",
    "        best_estimator = estim.best_estimator_\n",
    "        estim = best_estimator.fit(X_traindata[:num_valid_size], Y_traindata[:num_valid_size])\n",
    "\n",
    "        \n",
    "        ## model testing\n",
    "         \n",
    "        test_pre_y_arry = estim.predict(X_testdata)\n",
    "        y_predict_list1=[]\n",
    "        for x in test_pre_y_arry:\n",
    "            y_predict_list.append(x)\n",
    "            y_predict_list1.append(x)        \n",
    "        test_performance_score =  Evaluation_fun(y_predict_list1,Y_testdata )\n",
    "        test_performance_score_list.append(test_performance_score)\n",
    "\n",
    "    return y_predict_list,test_performance_score_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:24:26.697974Z",
     "start_time": "2018-08-28T06:24:26.173580Z"
    }
   },
   "outputs": [],
   "source": [
    "data = get_demo_dict_data()\n",
    "top_1000_data_X = data['X']\n",
    "top_1000_data_Y = data['Y']\n",
    "del data\n",
    "\n",
    "X_3factor_list = ['mve','bm','mom12m']\n",
    "top_1000_data_X = top_1000_data_X.loc[:,X_3factor_list ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:27:28.814995Z",
     "start_time": "2018-08-28T06:24:26.698977Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "1\n",
      "2\n",
      "3\n",
      "4\n",
      "5\n",
      "6\n",
      "7\n",
      "8\n",
      "9\n",
      "10\n",
      "11\n",
      "12\n",
      "13\n",
      "14\n",
      "15\n",
      "16\n",
      "17\n",
      "18\n",
      "19\n",
      "20\n",
      "21\n",
      "22\n",
      "23\n",
      "24\n",
      "25\n",
      "26\n",
      "27\n",
      "28\n",
      "29\n"
     ]
    }
   ],
   "source": [
    "y_predict_list,test_performance_score_list = rolling_model_OLS3_annual(df_X = top_1000_data_X,\n",
    "                                                                       df_Y = top_1000_data_Y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:27:29.026555Z",
     "start_time": "2018-08-28T06:27:28.816999Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3.928"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_real = np.array(top_1000_data_Y.loc['1988':])\n",
    "Evaluation_fun(y_predict_list,y_real)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:27:29.144870Z",
     "start_time": "2018-08-28T06:27:29.029563Z"
    }
   },
   "outputs": [],
   "source": [
    "file = open(Path + '\\\\output\\\\data\\\\Model_OLS3_Top1000_Prediction.pkl', 'wb')\n",
    "pickle.dump(y_predict_list, file)\n",
    "file.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T06:27:29.151888Z",
     "start_time": "2018-08-28T06:27:29.146875Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[17.228,\n",
       " 13.275,\n",
       " -35.935,\n",
       " 14.233,\n",
       " 7.984,\n",
       " 8.957,\n",
       " -33.215,\n",
       " 19.561,\n",
       " 14.186,\n",
       " 18.297,\n",
       " 1.368,\n",
       " 1.633,\n",
       " -3.667,\n",
       " -22.298,\n",
       " -32.718,\n",
       " 22.89,\n",
       " 16.914,\n",
       " 8.704,\n",
       " 16.862,\n",
       " -0.183,\n",
       " -32.79,\n",
       " 10.177,\n",
       " 17.607,\n",
       " -10.556,\n",
       " 14.641,\n",
       " 14.673,\n",
       " 13.668,\n",
       " -35.111,\n",
       " 19.584,\n",
       " 20.096]"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_performance_score_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": false,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "position": {
    "height": "451.2px",
    "left": "968px",
    "right": "20px",
    "top": "129px",
    "width": "513.8px"
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
