{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T05:30:22.888089Z",
     "start_time": "2018-08-29T05:30:22.881071Z"
    }
   },
   "outputs": [],
   "source": [
    "import random as rn\n",
    "import os\n",
    "import numpy as np\n",
    "os.environ['PYTHONHASHSEED'] = '0'\n",
    "np.random.seed(42)\n",
    "rn.seed(123)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T05:30:53.949350Z",
     "start_time": "2018-08-29T05:30:53.944336Z"
    }
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import pickle\n",
    "from sklearn.linear_model import SGDRegressor\n",
    "from sklearn.model_selection import PredefinedSplit\n",
    "from sklearn.metrics import r2_score\n",
    "from sklearn.model_selection import RandomizedSearchCV\n",
    "from scipy.stats import uniform\n",
    "from scipy.stats import randint as sp_randint\n",
    "from sklearn.metrics import make_scorer\n",
    "from sklearn.decomposition import PCA\n",
    "pd.set_option('display.max_columns', 50)\n",
    "pd.set_option('display.max_rows', 100)\n",
    "pd.set_option('display.float_format', lambda x: '%.3f' % x)\n",
    "Path = 'D:\\\\APViaML'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T05:30:24.228218Z",
     "start_time": "2018-08-29T05:30:24.223705Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_demo_dict_data():\n",
    "    file = open(Path + '\\\\data\\\\alldata_demo_top1000.pkl','rb')\n",
    "    raw_data = pickle.load(file)\n",
    "    file.close()\n",
    "    return raw_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T05:30:24.547577Z",
     "start_time": "2018-08-29T05:30:24.541561Z"
    }
   },
   "outputs": [],
   "source": [
    "def Evaluation_fun(predict_array,real_array):\n",
    "    List1 = []\n",
    "    List2 = []\n",
    "    if len(predict_array) != len(real_array):\n",
    "        print('Something is worng!')\n",
    "    else:\n",
    "        for i in range(len(predict_array)):\n",
    "            List1.append(np.square(predict_array[i]-real_array[i]))\n",
    "            List2.append(np.square(real_array[i]))\n",
    "        result = round(100*(1 - sum(List1)/sum(List2)),3)\n",
    "    return result\n",
    "\n",
    "def my_custom_score_func(ground_truth, predictions):\n",
    "    \n",
    "    num1 =sum((ground_truth-predictions)**2)\n",
    "    num2 = sum(ground_truth**2)\n",
    "    return 1-num1/num2\n",
    "\n",
    "self_score = make_scorer(my_custom_score_func, greater_is_better=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T05:40:31.170458Z",
     "start_time": "2018-08-29T05:40:31.151408Z"
    },
    "code_folding": []
   },
   "outputs": [],
   "source": [
    "def rolling_model_PCA_annual(\n",
    "    df_X ,\n",
    "    df_Y ,\n",
    "    num1 = 100):\n",
    "                                    \n",
    "    y_predict_list = []\n",
    "    num_comp_list = []\n",
    "    test_performance_score_list = []\n",
    "\n",
    "    for i in range(30):\n",
    "        print(i)\n",
    "        ## define data index\n",
    "        traindata_startyear_str = str(1958) \n",
    "        traindata_endyear_str = str(i + 1987) \n",
    "        vdata_startyear_str = str(i + 1976) \n",
    "        vdata_endyear_str = str(i + 1987) \n",
    "        testdata_startyear_str = str(i + 1988) \n",
    " \n",
    "\n",
    "        ## get data     \n",
    "        X_traindata =  np.array(df_X.loc[traindata_startyear_str:traindata_endyear_str])\n",
    "        Y_traindata = np.array(df_Y.loc[traindata_startyear_str:traindata_endyear_str])\n",
    "        X_vdata = np.array(df_X.loc[vdata_startyear_str:vdata_endyear_str])\n",
    "        Y_vdata = np.array(df_Y.loc[vdata_startyear_str:vdata_endyear_str])\n",
    "        X_testdata = np.array(df_X.loc[testdata_startyear_str])\n",
    "        Y_testdata = np.array(df_Y.loc[testdata_startyear_str])\n",
    "\n",
    "        num_valid_size = len(X_traindata)-len(X_vdata)\n",
    "        \n",
    "        test_fold = -1 * np.ones(len(X_traindata))\n",
    "        test_fold[num_valid_size:] = 0\n",
    "        ps = PredefinedSplit(test_fold)\n",
    "\n",
    "                ## model setting\n",
    "        v_score = pd.DataFrame(index=range(3,80,1),columns=['v_score','test_score'])\n",
    "        v_score['num'] = range(3,80,1)\n",
    "        v_temp_list =[]\n",
    "        t_temp_list =[]\n",
    "        model_list = []\n",
    "        for j in range(3,80,1):\n",
    "            i = j-3\n",
    "            predict_array = np.zeros(shape=(77,len(Y_testdata)))\n",
    "            \n",
    "            pca = PCA(n_components=j)\n",
    "            param_dist = {\n",
    "              'power_t':uniform(0.1, 0.9),\n",
    "             'eta0':uniform(0.00001, 0.1),\n",
    "             'max_iter':sp_randint(1000, len(X_traindata)),\n",
    "             'tol':[0.001,0.0001,0.00001]}\n",
    "                \n",
    "            clf = SGDRegressor(shuffle =False,random_state=100,\n",
    "                         penalty= 'none',learning_rate='invscaling')\n",
    "            n_iter_search = num1\n",
    "            estim = RandomizedSearchCV(clf, param_distributions=param_dist,\n",
    "                                   n_iter=n_iter_search,scoring=self_score,\n",
    "                                  cv=ps.split(),iid=False,n_jobs=1)\n",
    "    \n",
    "        ## model fitting\n",
    "            X_reduced_train = pca.fit_transform(X_traindata)\n",
    "            ## search para\n",
    "            estim.fit(X_reduced_train[:,:j],Y_traindata)\n",
    "            #refit best model & fit\n",
    "            best_estimator = estim.best_estimator_\n",
    "            estim = best_estimator.fit(X_traindata[:num_valid_size,:j], Y_traindata[:num_valid_size])     \n",
    "            \n",
    "            model_list.append(estim)\n",
    "            #data validation\n",
    "            X_reduced_v = pca.fit_transform(X_vdata)\n",
    "            v_pre_y_array = estim.predict(X_reduced_v[:,:j])\n",
    "            \n",
    "            v_performance_score = Evaluation_fun(v_pre_y_array,Y_vdata)\n",
    "            v_temp_list.append(v_performance_score)\n",
    "            \n",
    "            #data test infomation\n",
    "            X_reduced_test = pca.fit_transform(X_testdata)\n",
    "            test_pre_y_array = estim.predict(X_reduced_test[:,:j])\n",
    "            predict_array[i]=test_pre_y_array\n",
    "            test_performance_score = Evaluation_fun(test_pre_y_array,Y_testdata)\n",
    "            t_temp_list.append(test_performance_score)            \n",
    "            \n",
    "            \n",
    "        v_score['v_score'] = v_temp_list\n",
    "        v_score['test_score'] = t_temp_list\n",
    "        v_score.sort_values(by='v_score',inplace=True,ascending=False)\n",
    "        best_num = v_score.iloc[0,2]\n",
    "            \n",
    "        ##store the best num\n",
    "        num_comp_list.append(best_num)  \n",
    "        num2 = best_num - 3\n",
    "        best_predict_array = predict_array[num2]\n",
    "        test_performance_score_list.append(v_score.iloc[0,1])\n",
    "        for x in best_predict_array:\n",
    "            y_predict_list.append(x)        \n",
    "\n",
    "        best_estim = model_list[num2]\n",
    "        #save best model \n",
    "        file = open(Path + '\\\\model\\\\PCR\\\\' + testdata_startyear_str+ 'Model_PCR_Top1000_Prediction.pkl', 'wb')\n",
    "        pickle.dump(best_estim, file)\n",
    "        file.close()\n",
    "        \n",
    "    return y_predict_list,test_performance_score_list,num_comp_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T05:30:27.927909Z",
     "start_time": "2018-08-29T05:30:27.415043Z"
    }
   },
   "outputs": [],
   "source": [
    "data = get_demo_dict_data()\n",
    "top_1000_data_X = data['X']\n",
    "top_1000_data_Y = data['Y']\n",
    "del data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T06:09:16.126449Z",
     "start_time": "2018-08-29T05:40:40.592040Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n"
     ]
    },
    {
     "ename": "IndexError",
     "evalue": "list index out of range",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-14-93a6ab14dc9f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      1\u001b[0m y_predict_list,test_performance_score_list = rolling_model_PCA_annual(df_X = top_1000_data_X,\n\u001b[1;32m----> 2\u001b[1;33m                                                                        df_Y = top_1000_data_Y)\n\u001b[0m",
      "\u001b[1;32m<ipython-input-13-b191db501cd1>\u001b[0m in \u001b[0;36mrolling_model_PCA_annual\u001b[1;34m(df_X, df_Y, num1)\u001b[0m\n\u001b[0;32m     93\u001b[0m             \u001b[0my_predict_list\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     94\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 95\u001b[1;33m         \u001b[0mbest_estim\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel_list\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mnum2\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     96\u001b[0m         \u001b[1;31m#save best model\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     97\u001b[0m         \u001b[0mfile\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mPath\u001b[0m \u001b[1;33m+\u001b[0m \u001b[1;34m'\\\\model\\\\PCR\\\\'\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mtestdata_startyear_str\u001b[0m\u001b[1;33m+\u001b[0m \u001b[1;34m'Model_PCR_Top1000_Prediction.pkl'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'wb'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mIndexError\u001b[0m: list index out of range"
     ]
    }
   ],
   "source": [
    "y_predict_list,test_performance_score_list = rolling_model_PCA_annual(df_X = top_1000_data_X,\n",
    "                                                                       df_Y = top_1000_data_Y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-29T01:20:37.216319Z",
     "start_time": "2018-08-29T01:20:37.056891Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2.792"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_real = np.array(top_1000_data_Y.loc['1988':])\n",
    "Evaluation_fun(y_predict_list,y_real)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-08-28T14:25:07.968788Z",
     "start_time": "2018-08-28T14:25:07.875532Z"
    }
   },
   "outputs": [],
   "source": [
    "file = open(Path + '\\\\output\\\\data\\\\Model_OLS_Top1000_Prediction.pkl', 'wb')\n",
    "pickle.dump(y_predict_list, file)\n",
    "file.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": false,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
