{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import random as rn\n",
    "\n",
    "import os\n",
    "os.environ['PYTHONHASHSEED'] = '0'\n",
    "np.random.seed(42)\n",
    "rn.seed(12345)\n",
    "session_conf = tf.ConfigProto(device_count={\"CPU\": 4},intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n",
    "\n",
    "from keras import backend as K\n",
    "\n",
    "\n",
    "tf.set_random_seed(1234)\n",
    "sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n",
    "K.set_session(sess)\n",
    "\n",
    "\n",
    "import pandas as pd\n",
    "\n",
    "import pickle\n",
    "from sklearn.model_selection import PredefinedSplit\n",
    "\n",
    "from keras.models import Sequential\n",
    "from keras.layers.core import Dense, Dropout, Activation\n",
    "\n",
    "from keras.layers.normalization import BatchNormalization\n",
    "from keras.optimizers import Adam\n",
    "from keras.callbacks import EarlyStopping\n",
    "from keras import regularizers\n",
    "from keras import initializers\n",
    "from sklearn.metrics import mean_squared_error\n",
    "Path = 'D:\\\\APViaML'\n",
    "from keras.callbacks import ModelCheckpoint\n",
    "from keras.models import load_model\n",
    "\n",
    "from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n",
    "pd.set_option('display.max_columns', 50)\n",
    "pd.set_option('display.max_rows', 100)\n",
    "pd.set_option('display.float_format', lambda x: '%.3f' % x)\n",
    "\n",
    "\n",
    "def get_demo_dict_data():\n",
    "    file = open(Path + '\\\\data\\\\alldata_demo_top1000.pkl','rb')\n",
    "    raw_data = pickle.load(file)\n",
    "    file.close()\n",
    "    return raw_data\n",
    "\n",
    "data = get_demo_dict_data()\n",
    "\n",
    "top_1000_data_X = data['X']\n",
    "top_1000_data_Y = data['Y']\n",
    "del data\n",
    "\n",
    "def creat_data(num,df_X=top_1000_data_X,df_Y=top_1000_data_Y):\n",
    "    '''\n",
    "    Data providing function:\n",
    "\n",
    "    This function is separated from model() so that hyperopt\n",
    "    won't reload data for each evaluation run.\n",
    "    '''\n",
    "    traindata_startyear_str = str(1958) \n",
    "    traindata_endyear_str = str(num + 1987) \n",
    "    vdata_startyear_str = str(num + 1976) \n",
    "    vdata_endyear_str = str(num + 1987) \n",
    "    testdata_startyear_str = str(num + 1988) \n",
    "  \n",
    "    X_traindata =  np.array(df_X.loc[traindata_startyear_str:traindata_endyear_str])\n",
    "    Y_traindata = np.array(df_Y.loc[traindata_startyear_str:traindata_endyear_str])\n",
    "    X_vdata = np.array(df_X.loc[vdata_startyear_str:vdata_endyear_str])\n",
    "    Y_vdata = np.array(df_Y.loc[vdata_startyear_str:vdata_endyear_str])\n",
    "    X_testdata = np.array(df_X.loc[testdata_startyear_str])\n",
    "    Y_testdata = np.array(df_Y.loc[testdata_startyear_str])\n",
    "        \n",
    "    return X_traindata, Y_traindata, X_vdata, Y_vdata, X_testdata, Y_testdata\n",
    "\n",
    "\n",
    "def Evaluation_fun(predict_array,real_array):\n",
    "    List1 = []\n",
    "    List2 = []\n",
    "    if len(predict_array) != len(real_array):\n",
    "        print('Something is worng!')\n",
    "    else:\n",
    "        for i in range(len(predict_array)):\n",
    "            List1.append(np.square(predict_array[i]-real_array[i]))\n",
    "            List2.append(np.square(real_array[i]))\n",
    "        result = round(100*(1 - sum(List1)/sum(List2)),3)\n",
    "    return result\n",
    "\n",
    "#define search space\n",
    "space = {'ll_float':hp.uniform('ll_float',0.01,0.2),\n",
    "         'lr': hp.loguniform('lr',np.log(0.005),np.log(0.2)),\n",
    "         'beta_1_float':hp.uniform('beta_1_float',0.8,0.95),\n",
    "         'beta_2_float':hp.uniform('beta_2_float',0.98,0.9999),\n",
    "         'epsilon_float':hp.uniform('epsilon_float',1e-09,1e-07), ##note\n",
    "         'batch_size': hp.quniform('batch_size',10,500,1),\n",
    "         'epochs': hp.quniform('epochs',20,50,1)\n",
    "         }\n",
    "    \n",
    "## set params random search time,when set 50,something will be wrong\n",
    "try_num1 = int(50)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "i=10\n",
    "print(i)\n",
    "    #split data\n",
    "X_traindata, Y_traindata, X_vdata, Y_vdata, X_testdata, Y_testdata = creat_data(num=i)\n",
    "\n",
    "#define NN1\n",
    "def f_NN1(params):\n",
    "    ## define params\n",
    "    ll_float= params[\"ll_float\"]#0.1\n",
    "    learn_rate_float= params[\"lr\"] #0.01\n",
    "    beta_1_float= params[\"beta_1_float\"] # 0.9\n",
    "    beta_2_float= params[\"beta_2_float\"] #0.999\n",
    "    epsilon_float= params[\"epsilon_float\"] #1e-08\n",
    "    batch_size_num = params['batch_size'] #\n",
    "    epochs_num = params['epochs'] #50\n",
    "\n",
    "    ## model structure\n",
    "    model_NN1 = Sequential()\n",
    "    init = initializers.he_normal(seed=100)\n",
    "    model_NN1.add(Dense(32, input_dim =len(X_traindata[0]),\n",
    "                        kernel_initializer=init ,\n",
    "                        kernel_regularizer=regularizers.l1(ll_float)))\n",
    "    model_NN1.add(Activation(\"relu\"))\n",
    "    model_NN1.add(BatchNormalization())\n",
    "    model_NN2.add(Dense(16, \n",
    "                        kernel_initializer=init ,\n",
    "                        kernel_regularizer=regularizers.l1(ll_float)))\n",
    "    model_NN2.add(Activation(\"relu\"))\n",
    "    model_NN2.add(BatchNormalization())\n",
    "    model_NN1.add(Dense(1))\n",
    "    \n",
    "    ## comile model\n",
    "    adam=Adam(lr=learn_rate_float, beta_1=beta_1_float, beta_2=beta_2_float, epsilon=epsilon_float)\n",
    "    model_NN1.compile(loss='mse', optimizer=adam,metrics=['mse'])\n",
    "    \n",
    "    ## callback fun\n",
    "    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=0, mode='auto')\n",
    "    model_filepath = Path + '\\\\model\\\\NN1\\\\temp\\\\best_weights.h5'\n",
    "    checkpoint = ModelCheckpoint(filepath=model_filepath,save_weights_only=False,monitor='val_loss',mode='min' ,save_best_only='True')\n",
    "    callback_lists = [early_stopping,checkpoint]\n",
    "    \n",
    "    ## fit model\n",
    "    model_NN1.fit(X_traindata, Y_traindata,\n",
    "              batch_size = int(batch_size_num) ,\n",
    "              epochs = int(epochs_num),\n",
    "              verbose = 0,\n",
    "              validation_data=(X_vdata, Y_vdata),\n",
    "              callbacks=callback_lists ,\n",
    "              shuffle=False)\n",
    "\n",
    "    ##get the best model\n",
    "    best_model = load_model(model_filepath)\n",
    "    # validate model\n",
    "    Y_pre_v = best_model.predict(X_vdata,verbose = 0)\n",
    "\n",
    "    Y_pre_vlist=[]\n",
    "    for x in Y_pre_v[:,0]:\n",
    "        Y_pre_vlist.append(x)\n",
    "\n",
    "    v_score = Evaluation_fun(Y_pre_vlist, Y_vdata)\n",
    "\n",
    "    ## prediction & save\n",
    "    Y_pre =best_model.predict(X_testdata,verbose = 1)\n",
    "    \n",
    "    Y_pre_list=[]\n",
    "    for x in Y_pre[:,0]:\n",
    "        Y_pre_list.append(x)\n",
    "    test_score = Evaluation_fun(Y_pre_list, Y_testdata)\n",
    "   # print('Preformance:',v_score)\n",
    "    return {'loss': -v_score , 'status': STATUS_OK, \n",
    "            'y_pre_list':Y_pre_list,'test_score':test_score,\n",
    "            'models':best_model}\n",
    "\n",
    "trials = Trials()\n",
    "fmin(f_NN1, space, algo=tpe.suggest, max_evals=try_num1, trials=trials)\n",
    "\n",
    "loss_list = trials.losses()\n",
    "min_loss = min(loss_list)\n",
    "for k in range(try_num1):\n",
    "    if min_loss == loss_list[k]:\n",
    "        key = k\n",
    "best_results = trials.results[key]\n",
    "\n",
    "\n",
    "final_model =  best_results['models']\n",
    "final_model.save(Path + '\\\\model\\\\NN1\\\\'+ str(i+1988)+'_Model_NN1_Top1000_Prediction.h5')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": false,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
