{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow.keras as keras\n",
    "import tensorflow.keras.layers as layers\n",
    "import numpy as np\n",
    "\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import sys\n",
    "import seaborn as sns\n",
    "import warnings\n",
    "\n",
    "from sklearn.model_selection import KFold, cross_val_score as CVS, train_test_split as TTS\n",
    "from sklearn.metrics import mean_squared_error as MSE\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import mean_absolute_error #平方绝对误差\n",
    "from sklearn.metrics import r2_score#R square\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签\n",
    "plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_325 = pd.read_excel(\"3-数据清洗后325样本数据.xlsx\", na_values=np.nan)\n",
    "data_325.drop(['样本编号', '时间', '产品性质：硫含量','产品性质：辛烷值'], axis=1, inplace=True)\n",
    "label = data_325['RON损失']\n",
    "data_325_corr = data_325.corr()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "corr_list = dict(data_325_corr.iloc[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'RON损失': 1.0,\n",
       " '原料性质：硫含量': -0.15492341987801606,\n",
       " '原料性质：辛烷值': -0.03659090253178292,\n",
       " '原料性质：饱和烃': 0.22786101194372982,\n",
       " '原料性质：烯烃': -0.18765934537045034,\n",
       " '原料性质：芳烃': -0.0608612182274977,\n",
       " '原料性质：溴值': 0.11671199874766362,\n",
       " '原料性质：密度': -0.10669343144138563,\n",
       " '待生吸附剂性质：焦炭': -0.15961888365506713,\n",
       " '待生吸附剂性质：S': 0.00927577410859538,\n",
       " '再生吸附剂性质：焦炭': -0.05672641807456995,\n",
       " '再生吸附剂性质：S': -0.05875790400741909,\n",
       " 'S-ZORB.CAL_H2.PV': 0.2901741749774067,\n",
       " 'S-ZORB.PDI_2102.PV': 0.25692784609321867,\n",
       " 'S-ZORB.PT_2801.PV': 0.014502299816070199,\n",
       " 'S-ZORB.FC_2801.PV': -0.24821297310380552,\n",
       " 'S-ZORB.TE_2103.PV': 0.1269082701573267,\n",
       " 'S-ZORB.TE_2005.PV': -0.0027367705819765996,\n",
       " 'S-ZORB.PT_2101.PV': -0.06521102562351905,\n",
       " 'S-ZORB.PDT_2104.PV': 0.3194412459848149,\n",
       " 'S-ZORB.TE_2301.PV': 0.22632756730113604,\n",
       " 'S-ZORB.PT_2301.PV': -0.06622225030936965,\n",
       " 'S-ZORB.PC_2105.PV': -0.1879214427968404,\n",
       " 'S-ZORB.PC_5101.PV': 0.09807280475489501,\n",
       " 'S-ZORB.TC_5005.PV': 0.02937200673596701,\n",
       " 'S-ZORB.LC_5001.PV': 0.2958247677443841,\n",
       " 'S-ZORB.LC_5101.PV': -0.337434141983431,\n",
       " 'S-ZORB.TE_5102.PV': 0.019819734158371946,\n",
       " 'S-ZORB.TE_5202.PV': 0.18437401480306762,\n",
       " 'S-ZORB.FT_5101.PV': -0.12739350668324806,\n",
       " 'S-ZORB.TE_9001.PV': -0.07471764249321325,\n",
       " 'S-ZORB.FT_9001.PV': -0.19866778278137767,\n",
       " 'S-ZORB.FT_9403.PV': -0.12539688295469167,\n",
       " 'S-ZORB.PT_9403.PV': 0.11275082657917898,\n",
       " 'S-ZORB.TE_9301.PV': 0.0008551620803599832,\n",
       " 'S-ZORB.FT_9202.PV': -0.05596490645555145,\n",
       " 'S-ZORB.FT_9302.PV': 0.3594070790303029,\n",
       " 'S-ZORB.FT_3301.PV': -0.22952649977940226,\n",
       " 'S-ZORB.FT_9402.PV': -0.1935098800585283,\n",
       " 'S-ZORB.PT_9402.PV': -0.16832904359544615,\n",
       " 'S-ZORB.PT_9401.PV': 0.004860330239177419,\n",
       " 'S-ZORB.PDC_2502.PV': -0.19332570799555493,\n",
       " 'S-ZORB.FC_1005.PV': -0.08667237415511708,\n",
       " 'S-ZORB.FC_1102.PV': 0.271922725896741,\n",
       " 'S-ZORB.TE_1105.PV': -0.22212805929172194,\n",
       " 'S-ZORB.PDI_1102.PV': 0.2760023576785639,\n",
       " 'S-ZORB.TE_1601.PV': 0.14440462189771694,\n",
       " 'S-ZORB.AC_6001.PV': -0.033984385542797806,\n",
       " 'S-ZORB.TE_1608.PV': 0.008843361224234743,\n",
       " 'S-ZORB.PT_6002.PV': -0.19882370472957503,\n",
       " 'S-ZORB.PC_1603.PV': -0.2558884081551451,\n",
       " 'S-ZORB.PT_1602A.PV': 0.20961702763656515,\n",
       " 'S-ZORB.PC_1301.PV': -0.01826604333710109,\n",
       " 'S-ZORB.PT_1201.PV': -0.10687759414849762,\n",
       " 'S-ZORB.TE_1201.PV': -0.21774330346538853,\n",
       " 'S-ZORB.TE_1203.PV': -0.12315432646252776,\n",
       " 'S-ZORB.PC_1202.PV': -0.09486274668360388,\n",
       " 'S-ZORB.TC_2801.PV': -0.31851820602040654,\n",
       " 'S-ZORB.FC_2601.PV': -0.1453865278217371,\n",
       " 'S-ZORB.PDT_2604.PV': -0.15056279269862746,\n",
       " 'S-ZORB.TE_2601.PV': -0.21083135236893283,\n",
       " 'S-ZORB.TC_2607.PV': 0.047860628581770814,\n",
       " 'S-ZORB.PDI_2703A.PV': 0.1498664052513051,\n",
       " 'S-ZORB.PT_1501.PV': 0.2482679651295373,\n",
       " 'S-ZORB.FT_9001.TOTAL': -0.08589788507720619,\n",
       " 'S-ZORB.FT_5201.TOTAL': -0.11263541874520655,\n",
       " 'S-ZORB.FT_5101.TOTAL': -0.08677126583107367,\n",
       " 'S-ZORB.FT_9101.TOTAL': -0.21085010160287512,\n",
       " 'S-ZORB.FT_3301.TOTAL': -0.19254196810154614,\n",
       " 'S-ZORB.FT_9201.TOTAL': -0.1792790204359457,\n",
       " 'S-ZORB.FT_9202.TOTAL': -0.1175526212370883,\n",
       " 'S-ZORB.FT_9301.TOTAL': -0.18439564956112137,\n",
       " 'S-ZORB.FT_9302.TOTAL': -0.10709223022822006,\n",
       " 'S-ZORB.FT_9401.TOTAL': -0.12635214449453291,\n",
       " 'S-ZORB.FT_9402.TOTAL': -0.20623453682516735,\n",
       " 'S-ZORB.FT_9403.TOTAL': -0.17073475039780495,\n",
       " 'S-ZORB.FC_1101.TOTAL': -0.0915213598874793,\n",
       " 'S-ZORB.FT_1204.TOTAL': -0.256314411093851,\n",
       " 'S-ZORB.FT_1001.TOTAL': -0.11245303384283656,\n",
       " 'S-ZORB.TE_1101.DACA': -0.231060371086551,\n",
       " 'S-ZORB.PT_1102.DACA': 0.027383642084600635,\n",
       " 'S-ZORB.PT_1103.DACA': -0.10358118821004211,\n",
       " 'S-ZORB.TE_1106.DACA': -0.2899374840694373,\n",
       " 'S-ZORB.LI_9102.DACA': 0.02067315240942256,\n",
       " 'S-ZORB.TE_9003.DACA': -0.07343428877647445,\n",
       " 'S-ZORB.TE_9002.DACA': -0.12337811883903699,\n",
       " 'S-ZORB.PC_9002.DACA': 0.20998014022870776,\n",
       " 'S-ZORB.LC_5102.DACA': -0.04225199226104218,\n",
       " 'S-ZORB.LT_3801.DACA': 0.30021313683017326,\n",
       " 'S-ZORB.LT_3101.DACA': 0.017834931653882637,\n",
       " 'S-ZORB.PC_3101.DACA': -0.19920224041724977,\n",
       " 'S-ZORB.TE_3101.DACA': -0.11483394263559663,\n",
       " 'S-ZORB.FT_3303.DACA': -0.2272532484948421,\n",
       " 'S-ZORB.TE_1501.DACA': -0.04012691979630782,\n",
       " 'S-ZORB.TE_1502.DACA': -0.05479002416324695,\n",
       " 'S-ZORB.LT_2101.DACA': 0.16253716800500598,\n",
       " 'S-ZORB.FT_2701.DACA': -0.24268110734782442,\n",
       " 'S-ZORB.FC_2702.DACA': 0.2150792949122276,\n",
       " 'S-ZORB.TC_2702.DACA': -0.22191896427339197,\n",
       " 'S-ZORB.LT_2901.DACA': -0.06914233742964841,\n",
       " 'S-ZORB.TE_2901.DACA': -0.12461225021648417,\n",
       " 'S-ZORB.TE_2902.DACA': -0.051158002789832756,\n",
       " 'S-ZORB.TE_2501.DACA': -0.05675188729409778,\n",
       " 'S-ZORB.PT_2501.DACA': -0.19934638277649708,\n",
       " 'S-ZORB.PT_2502.DACA': -0.235346281695466,\n",
       " 'S-ZORB.FT_2433.DACA': -0.16375036349114566,\n",
       " 'S-ZORB.TE_2401.DACA': 0.1305851164225289,\n",
       " 'S-ZORB.SIS_TE_2802': -0.2911863880343874,\n",
       " 'S-ZORB.TE_5002.DACA': -0.20713109707321495,\n",
       " 'S-ZORB.TE_5004.DACA': 0.011384425545082481,\n",
       " 'S-ZORB.TE_5006.DACA': 0.038106456425006964,\n",
       " 'S-ZORB.TE_5003.DACA': 0.015290037099315647,\n",
       " 'S-ZORB.TE_5201.DACA': 0.3013085164385599,\n",
       " 'S-ZORB.TE_5101.DACA': -0.06422961148978228,\n",
       " 'S-ZORB.FT_2431.DACA': 0.04965969068582465,\n",
       " 'S-ZORB.SIS_TE_2606.PV': 0.0550066880302667,\n",
       " 'S-ZORB.SIS_TE_2605.PV': 0.019076062435079873,\n",
       " 'S-ZORB.PDT_2704.DACA': 0.14791119312213435,\n",
       " 'S-ZORB.PDC_2702.DACA': -0.11464086246472908,\n",
       " 'S-ZORB.PT_6009.DACA': -0.01663468284726158,\n",
       " 'S-ZORB.LI_2104.DACA': 0.31951741227729086,\n",
       " 'S-ZORB.TE_6002.DACA': -0.007341891125083413,\n",
       " 'S-ZORB.TE_6001.DACA': 0.08411218291279998,\n",
       " 'S-ZORB.PT_1101.DACA': -0.014067224287991958,\n",
       " 'S-ZORB.FC_5103.DACA': 0.19409826862497612,\n",
       " 'S-ZORB.PDT_3601.DACA': 0.21862512173848606,\n",
       " 'S-ZORB.PT_6006.DACA': 0.08069384896408476,\n",
       " 'S-ZORB.SIS_TE_6009.PV': 0.006526676946606415,\n",
       " 'S-ZORB.SIS_PT_6007.PV': 0.113524708510829,\n",
       " 'S-ZORB.TE_6008.DACA': -0.02271084795069482,\n",
       " 'S-ZORB.PT_5201.DACA': 0.1015108827790286,\n",
       " 'S-ZORB.PC_3501.DACA': 0.2315053023516191,\n",
       " 'S-ZORB.LT_9101.DACA': 0.09067760346108261,\n",
       " 'S-ZORB.PT_6003.DACA': 0.1844337949724357,\n",
       " 'S-ZORB.PDI_2105.DACA': -0.2474395643408618,\n",
       " 'S-ZORB.BS_LT_2401.PV': -0.16256921725187723,\n",
       " 'S-ZORB.FT_3701.DACA': 0.07687774156682424,\n",
       " 'S-ZORB.PT_2603.DACA': -0.19049405779490258,\n",
       " 'S-ZORB.PDT_2606.DACA': 0.08801593099530565,\n",
       " 'S-ZORB.ZT_2634.DACA': -0.24176116314969334,\n",
       " 'S-ZORB.TE_2603.DACA': -0.23800796058080242,\n",
       " 'S-ZORB.TE_2604.DACA': 0.019145153918961356,\n",
       " 'S-ZORB.TE_2104.DACA': 0.17139519831341968,\n",
       " 'S-ZORB.PDT_2001.DACA': -0.20875494529620764,\n",
       " 'S-ZORB.TE_2002.DACA': 0.04081410084264835,\n",
       " 'S-ZORB.TE_2004.DACA': -0.003004105440560897,\n",
       " 'S-ZORB.TE_2003.DACA': 0.04525986590694542,\n",
       " 'S-ZORB.PDT_1003.DACA': -0.2490705725594176,\n",
       " 'S-ZORB.PDT_1002.DACA': 0.20483299620193093,\n",
       " 'S-ZORB.PDT_3503.DACA': 0.20066013807784222,\n",
       " 'S-ZORB.PDT_3502.DACA': -0.036933690708002925,\n",
       " 'S-ZORB.PDT_3002.DACA': 0.10043552951386786,\n",
       " 'S-ZORB.PDT_1004.DACA': 0.01957721956960743,\n",
       " 'S-ZORB.PDI_2903.DACA': -0.2133053124524788,\n",
       " 'S-ZORB.PT_2901.DACA': 0.11408000037259386,\n",
       " 'S-ZORB.PT_2106.DACA': -0.19086765345041604,\n",
       " 'S-ZORB.TE_7508B.DACA': -0.09838948528159644,\n",
       " 'S-ZORB.TE_7506B.DACA': -0.11305635545612554,\n",
       " 'S-ZORB.PT_7505B.DACA': -0.03897641298049327,\n",
       " 'S-ZORB.TE_7504B.DACA': -0.08170234295057675,\n",
       " 'S-ZORB.PT_7503B.DACA': -0.04878407861146759,\n",
       " 'S-ZORB.TE_7502B.DACA': -0.11557685548350823,\n",
       " 'S-ZORB.TE_7106B.DACA': 0.1314296112256297,\n",
       " 'S-ZORB.TE_7108B.DACA': 0.13375677530626698,\n",
       " 'S-ZORB.PT_7107B.DACA': 0.1279808830655149,\n",
       " 'S-ZORB.PT_7103B.DACA': 0.10482509791598138,\n",
       " 'S-ZORB.PT_1604.DACA': 0.022353835285764012,\n",
       " 'S-ZORB.TC_1607.DACA': 0.1427134877765927,\n",
       " 'S-ZORB.PT_6005.DACA': -0.19727305352175578,\n",
       " 'S-ZORB.PT_6008.DACA': -0.279067561812246,\n",
       " 'S-ZORB.PT_1601.DACA': 0.001707831758407191,\n",
       " 'S-ZORB.TE_1605.DACA': 0.08083267829515871,\n",
       " 'S-ZORB.SIS_FT_3202.PV': -0.19052815497049477,\n",
       " 'S-ZORB.TXE_3202A.DACA': 0.0711869792109372,\n",
       " 'S-ZORB.TXE_3201A.DACA': 0.0647766738490287,\n",
       " 'S-ZORB.TXE_2203A.DACA': 0.24671011532281484,\n",
       " 'S-ZORB.TXE_2202A.DACA': 0.24666290171102692,\n",
       " 'S-ZORB.TE_5008.DACA': 0.11288138249148826,\n",
       " 'S-ZORB.TE_5009.DACA': -0.05944036468771802,\n",
       " 'S-ZORB.FC_5001.DACA': 0.2547375900467951,\n",
       " 'S-ZORB.TE_1503.DACA': -0.11329497413517337,\n",
       " 'S-ZORB.AT-0001.DACA.PV': 0.2780548835380038,\n",
       " 'S-ZORB.AT-0002.DACA.PV': 0.277238879638677,\n",
       " 'S-ZORB.AT-0004.DACA.PV': -0.22325088145550206,\n",
       " 'S-ZORB.AT-0005.DACA.PV': 0.22870839207757634,\n",
       " 'S-ZORB.AT-0006.DACA.PV': -0.20752138258071245,\n",
       " 'S-ZORB.AT-0007.DACA.PV': -0.21619796577927594,\n",
       " 'S-ZORB.AT-0008.DACA.PV': -0.22506569468434764,\n",
       " 'S-ZORB.AT-0009.DACA.PV': -0.21641773884696774,\n",
       " 'S-ZORB.AT-0011.DACA.PV': -0.22264900085993714,\n",
       " 'S-ZORB.FT_1204.DACA.PV': 0.11274239598923164,\n",
       " 'S-ZORB.LC_5102.PIDA.PV': -0.04244329822617364,\n",
       " 'S-ZORB.TE_1102.DACA.PV': 0.165267849615711,\n",
       " 'S-ZORB.CAL.LINE.PV': 0.12329788231773357,\n",
       " 'S-ZORB.CAL.CANGLIANG.PV': 0.3195417316313435,\n",
       " 'S-ZORB.CAL.SPEED.PV': -0.3203618025064861,\n",
       " 'S-ZORB.FT_1503.TOTALIZERA.PV': 0.30121588089762213,\n",
       " 'S-ZORB.FT_1504.TOTALIZERA.PV': 0.3061069287124234,\n",
       " 'S-ZORB.PC_1001A.PV': 0.23047504093930693}"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "corr_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "corr_list = sorted(corr_list.items(), key=lambda item:abs(item[1]), reverse=True)\n",
    "\n",
    "new_feature_names = [f[0] for f in corr_list[1:31]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "30"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(new_feature_names)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "Xtrain,Xtest,Ytrain,Ytest = TTS(data_325[ new_feature_names ], label, test_size=0.3, random_state=111)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#RON_sun_train, RON_sun_test = Xtrain['RON损失'], Xtest['RON损失']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#new_Xtrain = Xtrain.drop(['RON损失'], axis=1)\n",
    "#new_Xtest = Xtest.drop(['RON损失'], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense (Dense)                (None, 256)               7936      \n",
      "_________________________________________________________________\n",
      "dense_1 (Dense)              (None, 128)               32896     \n",
      "_________________________________________________________________\n",
      "dense_2 (Dense)              (None, 64)                8256      \n",
      "_________________________________________________________________\n",
      "dense_3 (Dense)              (None, 1)                 65        \n",
      "=================================================================\n",
      "Total params: 49,153\n",
      "Trainable params: 49,153\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "WARNING:tensorflow:Falling back from v2 loop because of error: Failed to find data adapter that can handle input: <class 'pandas.core.frame.DataFrame'>, <class 'NoneType'>\n",
      "Train on 204 samples, validate on 23 samples\n",
      "Epoch 1/150\n",
      "204/204 [==============================] - 2s 8ms/sample - loss: 22.3555 - val_loss: 0.3612\n",
      "Epoch 2/150\n",
      "204/204 [==============================] - 0s 100us/sample - loss: 0.2956 - val_loss: 0.0344\n",
      "Epoch 3/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0845 - val_loss: 0.0170\n",
      "Epoch 4/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0663 - val_loss: 0.0172\n",
      "Epoch 5/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0660 - val_loss: 0.0174\n",
      "Epoch 6/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0659 - val_loss: 0.0210\n",
      "Epoch 7/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0654 - val_loss: 0.0221\n",
      "Epoch 8/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0654 - val_loss: 0.0176\n",
      "Epoch 9/150\n",
      "204/204 [==============================] - 0s 91us/sample - loss: 0.0655 - val_loss: 0.0262\n",
      "Epoch 10/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0662 - val_loss: 0.0224\n",
      "Epoch 11/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0654 - val_loss: 0.0198\n",
      "Epoch 12/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0652 - val_loss: 0.0181\n",
      "Epoch 13/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0654 - val_loss: 0.0203\n",
      "Epoch 14/150\n",
      "204/204 [==============================] - 0s 96us/sample - loss: 0.0651 - val_loss: 0.0219\n",
      "Epoch 15/150\n",
      "204/204 [==============================] - 0s 83us/sample - loss: 0.0653 - val_loss: 0.0215\n",
      "Epoch 16/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0654 - val_loss: 0.0229\n",
      "Epoch 17/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0657 - val_loss: 0.0243\n",
      "Epoch 18/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0657 - val_loss: 0.0191\n",
      "Epoch 19/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0654 - val_loss: 0.0274\n",
      "Epoch 20/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0665 - val_loss: 0.0206\n",
      "Epoch 21/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0656 - val_loss: 0.0169\n",
      "Epoch 22/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0662 - val_loss: 0.0304\n",
      "Epoch 23/150\n",
      "204/204 [==============================] - 0s 83us/sample - loss: 0.0675 - val_loss: 0.0186\n",
      "Epoch 24/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0651 - val_loss: 0.0204\n",
      "Epoch 25/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0656 - val_loss: 0.0205\n",
      "Epoch 26/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0652 - val_loss: 0.0183\n",
      "Epoch 27/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0653 - val_loss: 0.0193\n",
      "Epoch 28/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0654 - val_loss: 0.0195\n",
      "Epoch 29/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0651 - val_loss: 0.0185\n",
      "Epoch 30/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0651 - val_loss: 0.0178\n",
      "Epoch 31/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0654 - val_loss: 0.0184\n",
      "Epoch 32/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0652 - val_loss: 0.0189\n",
      "Epoch 33/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0655 - val_loss: 0.0200\n",
      "Epoch 34/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0651 - val_loss: 0.0237\n",
      "Epoch 35/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0655 - val_loss: 0.0180\n",
      "Epoch 36/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0652 - val_loss: 0.0220\n",
      "Epoch 37/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0656 - val_loss: 0.0193\n",
      "Epoch 38/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0654 - val_loss: 0.0249\n",
      "Epoch 39/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0660 - val_loss: 0.0205\n",
      "Epoch 40/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0235\n",
      "Epoch 41/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0655 - val_loss: 0.0186\n",
      "Epoch 42/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0652 - val_loss: 0.0169\n",
      "Epoch 43/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0663 - val_loss: 0.0173\n",
      "Epoch 44/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0657 - val_loss: 0.0176\n",
      "Epoch 45/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0659 - val_loss: 0.0182\n",
      "Epoch 46/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0651 - val_loss: 0.0218\n",
      "Epoch 47/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0655 - val_loss: 0.0258\n",
      "Epoch 48/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0660 - val_loss: 0.0197\n",
      "Epoch 49/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0650 - val_loss: 0.0191\n",
      "Epoch 50/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0651 - val_loss: 0.0176\n",
      "Epoch 51/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0654 - val_loss: 0.0204\n",
      "Epoch 52/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0650 - val_loss: 0.0182\n",
      "Epoch 53/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0651 - val_loss: 0.0186\n",
      "Epoch 54/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0654 - val_loss: 0.0220\n",
      "Epoch 55/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0652 - val_loss: 0.0207\n",
      "Epoch 56/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0651 - val_loss: 0.0239\n",
      "Epoch 57/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0656 - val_loss: 0.0197\n",
      "Epoch 58/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0172\n",
      "Epoch 59/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0656 - val_loss: 0.0177\n",
      "Epoch 60/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0657 - val_loss: 0.0185\n",
      "Epoch 61/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0650 - val_loss: 0.0224\n",
      "Epoch 62/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0655 - val_loss: 0.0234\n",
      "Epoch 63/150\n",
      "204/204 [==============================] - 0s 99us/sample - loss: 0.0654 - val_loss: 0.0214\n",
      "Epoch 64/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0651 - val_loss: 0.0235\n",
      "Epoch 65/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0660 - val_loss: 0.0211\n",
      "Epoch 66/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0177\n",
      "Epoch 67/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0652 - val_loss: 0.0187\n",
      "Epoch 68/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0653 - val_loss: 0.0260\n",
      "Epoch 69/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0660 - val_loss: 0.0222\n",
      "Epoch 70/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0655 - val_loss: 0.0172\n",
      "Epoch 71/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0661 - val_loss: 0.0221\n",
      "Epoch 72/150\n",
      "204/204 [==============================] - 0s 83us/sample - loss: 0.0651 - val_loss: 0.0194\n",
      "Epoch 73/150\n",
      "204/204 [==============================] - 0s 92us/sample - loss: 0.0654 - val_loss: 0.0190\n",
      "Epoch 74/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0649 - val_loss: 0.0196\n",
      "Epoch 75/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0649 - val_loss: 0.0201\n",
      "Epoch 76/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0184\n",
      "Epoch 77/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0650 - val_loss: 0.0178\n",
      "Epoch 78/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0654 - val_loss: 0.0231\n",
      "Epoch 79/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0653 - val_loss: 0.0192\n",
      "Epoch 80/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0651 - val_loss: 0.0186\n",
      "Epoch 81/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0649 - val_loss: 0.0226\n",
      "Epoch 82/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0652 - val_loss: 0.0215\n",
      "Epoch 83/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0279\n",
      "Epoch 84/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0667 - val_loss: 0.0186\n",
      "Epoch 85/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0653 - val_loss: 0.0177\n",
      "Epoch 86/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0652 - val_loss: 0.0192\n",
      "Epoch 87/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0186\n",
      "Epoch 88/150\n",
      "204/204 [==============================] - 0s 96us/sample - loss: 0.0649 - val_loss: 0.0177\n",
      "Epoch 89/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0652 - val_loss: 0.0211\n",
      "Epoch 90/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0652 - val_loss: 0.0184\n",
      "Epoch 91/150\n",
      "204/204 [==============================] - 0s 83us/sample - loss: 0.0650 - val_loss: 0.0206\n",
      "Epoch 92/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0653 - val_loss: 0.0221\n",
      "Epoch 93/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0651 - val_loss: 0.0183\n",
      "Epoch 94/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0651 - val_loss: 0.0203\n",
      "Epoch 95/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0653 - val_loss: 0.0178\n",
      "Epoch 96/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0651 - val_loss: 0.0199\n",
      "Epoch 97/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0648 - val_loss: 0.0186\n",
      "Epoch 98/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0250\n",
      "Epoch 99/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0657 - val_loss: 0.0228\n",
      "Epoch 100/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0653 - val_loss: 0.0221\n",
      "Epoch 101/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0652 - val_loss: 0.0226\n",
      "Epoch 102/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0652 - val_loss: 0.0199\n",
      "Epoch 103/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0649 - val_loss: 0.0194\n",
      "Epoch 104/150\n",
      "204/204 [==============================] - 0s 83us/sample - loss: 0.0648 - val_loss: 0.0179\n",
      "Epoch 105/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0654 - val_loss: 0.0237\n",
      "Epoch 106/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0655 - val_loss: 0.0189\n",
      "Epoch 107/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0650 - val_loss: 0.0188\n",
      "Epoch 108/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0648 - val_loss: 0.0192\n",
      "Epoch 109/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0649 - val_loss: 0.0209\n",
      "Epoch 110/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0650 - val_loss: 0.0197\n",
      "Epoch 111/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0648 - val_loss: 0.0181\n",
      "Epoch 112/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0650 - val_loss: 0.0205\n",
      "Epoch 113/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0648 - val_loss: 0.0174\n",
      "Epoch 114/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0653 - val_loss: 0.0191\n",
      "Epoch 115/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0649 - val_loss: 0.0187\n",
      "Epoch 116/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0648 - val_loss: 0.0191\n",
      "Epoch 117/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0647 - val_loss: 0.0216\n",
      "Epoch 118/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0181\n",
      "Epoch 119/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0648 - val_loss: 0.0234\n",
      "Epoch 120/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0652 - val_loss: 0.0233\n",
      "Epoch 121/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0654 - val_loss: 0.0200\n",
      "Epoch 122/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0648 - val_loss: 0.0225\n",
      "Epoch 123/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0652 - val_loss: 0.0214\n",
      "Epoch 124/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0652 - val_loss: 0.0201\n",
      "Epoch 125/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0648 - val_loss: 0.0197\n",
      "Epoch 126/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0647 - val_loss: 0.0180\n",
      "Epoch 127/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0648 - val_loss: 0.0219\n",
      "Epoch 128/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0650 - val_loss: 0.0215\n",
      "Epoch 129/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0649 - val_loss: 0.0182\n",
      "Epoch 130/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0649 - val_loss: 0.0170\n",
      "Epoch 131/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0657 - val_loss: 0.0199\n",
      "Epoch 132/150\n",
      "204/204 [==============================] - 0s 83us/sample - loss: 0.0648 - val_loss: 0.0197\n",
      "Epoch 133/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0650 - val_loss: 0.0237\n",
      "Epoch 134/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0652 - val_loss: 0.0225\n",
      "Epoch 135/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0650 - val_loss: 0.0184\n",
      "Epoch 136/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0647 - val_loss: 0.0188\n",
      "Epoch 137/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0648 - val_loss: 0.0223\n",
      "Epoch 138/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0652 - val_loss: 0.0169\n",
      "Epoch 139/150\n",
      "204/204 [==============================] - 0s 103us/sample - loss: 0.0657 - val_loss: 0.0192\n",
      "Epoch 140/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0649 - val_loss: 0.0270\n",
      "Epoch 141/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0663 - val_loss: 0.0204\n",
      "Epoch 142/150\n",
      "204/204 [==============================] - 0s 98us/sample - loss: 0.0650 - val_loss: 0.0174\n",
      "Epoch 143/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0653 - val_loss: 0.0174\n",
      "Epoch 144/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0657 - val_loss: 0.0171\n",
      "Epoch 145/150\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0653 - val_loss: 0.0191\n",
      "Epoch 146/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0647 - val_loss: 0.0203\n",
      "Epoch 147/150\n",
      "204/204 [==============================] - 0s 93us/sample - loss: 0.0649 - val_loss: 0.0222\n",
      "Epoch 148/150\n",
      "204/204 [==============================] - 0s 96us/sample - loss: 0.0648 - val_loss: 0.0173\n",
      "Epoch 149/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0651 - val_loss: 0.0203\n",
      "Epoch 150/150\n",
      "204/204 [==============================] - 0s 88us/sample - loss: 0.0648 - val_loss: 0.0213\n",
      "WARNING:tensorflow:Falling back from v2 loop because of error: Failed to find data adapter that can handle input: <class 'pandas.core.frame.DataFrame'>, <class 'NoneType'>\n",
      "[[1.2769077]\n",
      " [1.2619174]\n",
      " [1.2784818]\n",
      " [1.2769077]\n",
      " [1.2755202]\n",
      " [1.2784818]\n",
      " [1.276618 ]\n",
      " [1.2779009]\n",
      " [1.2778742]\n",
      " [1.2769617]\n",
      " [1.2759404]\n",
      " [1.2542888]\n",
      " [1.2779621]\n",
      " [1.2784818]\n",
      " [1.2637146]\n",
      " [1.2769617]\n",
      " [1.2662346]\n",
      " [1.2784818]\n",
      " [1.2779009]\n",
      " [1.2589384]\n",
      " [1.2762325]\n",
      " [1.2769077]\n",
      " [1.2778742]\n",
      " [1.2765144]\n",
      " [1.2784818]\n",
      " [1.2597523]\n",
      " [1.2784818]\n",
      " [1.2778742]\n",
      " [1.2784818]\n",
      " [1.276618 ]\n",
      " [1.2787808]\n",
      " [1.2769077]\n",
      " [1.2784818]\n",
      " [1.273045 ]\n",
      " [1.276618 ]\n",
      " [1.2778742]\n",
      " [1.2771374]\n",
      " [1.2779621]\n",
      " [1.2772065]\n",
      " [1.263962 ]\n",
      " [1.2730335]\n",
      " [1.272731 ]\n",
      " [1.2759404]\n",
      " [1.2784818]\n",
      " [1.2779009]\n",
      " [1.2762325]\n",
      " [1.2637198]\n",
      " [1.2638533]\n",
      " [1.2763867]\n",
      " [1.2769077]\n",
      " [1.2769077]\n",
      " [1.2575563]\n",
      " [1.2769077]\n",
      " [1.2784818]\n",
      " [1.2779621]\n",
      " [1.2771374]\n",
      " [1.2545466]\n",
      " [1.2784818]\n",
      " [1.2729805]\n",
      " [1.2529457]\n",
      " [1.2783942]\n",
      " [1.2784818]\n",
      " [1.2765144]\n",
      " [1.2763923]\n",
      " [1.276143 ]\n",
      " [1.2687176]\n",
      " [1.2778742]\n",
      " [1.2765617]\n",
      " [1.2771374]\n",
      " [1.2762754]\n",
      " [1.2753402]\n",
      " [1.2762325]\n",
      " [1.2617642]\n",
      " [1.2787808]\n",
      " [1.2737579]\n",
      " [1.2784818]\n",
      " [1.2783942]\n",
      " [1.2778742]\n",
      " [1.2702744]\n",
      " [1.2617209]\n",
      " [1.2759404]\n",
      " [1.2784818]\n",
      " [1.2759404]\n",
      " [1.2783942]\n",
      " [1.2784818]\n",
      " [1.2734469]\n",
      " [1.2779009]\n",
      " [1.2778742]\n",
      " [1.2769077]\n",
      " [1.2779621]\n",
      " [1.2787808]\n",
      " [1.2483029]\n",
      " [1.2730335]\n",
      " [1.2765144]\n",
      " [1.2755992]\n",
      " [1.2692144]\n",
      " [1.2755202]\n",
      " [1.2759404]]\n"
     ]
    }
   ],
   "source": [
    "# x_train=table[:300,:].astype('float64')\n",
    "# print(x_train[0,:])\n",
    "# print(x_train.shape)\n",
    "# y_train=y[:300].astype('float64')\n",
    "# print(y_train.shape)\n",
    "# (x_train, y_train), (x_test, y_test) = keras.datasets.boston_housing.load_data()\n",
    "model = keras.Sequential([\n",
    " layers.Dense(256, activation='sigmoid', input_shape=(30,)), layers.Dense(128, activation='sigmoid'), layers.Dense(64, activation='sigmoid'), layers.Dense(1)\n",
    "])\n",
    "\n",
    "model.compile(optimizer=keras.optimizers.SGD(0.1), loss='mean_squared_error')\n",
    "\n",
    "model.summary()\n",
    "\n",
    "model.fit(Xtrain, Ytrain, batch_size=50, epochs=150, validation_split=0.1, verbose=1)\n",
    "#x_test=table[300:,:].astype('float64')\n",
    "# np.random.shuffle(x_test)\n",
    "# y_test=y[300:].astype('float64')\n",
    "#print(x_test.shape)\n",
    "# result = model.evaluate(x_test, y_test)\n",
    "# print(result)\n",
    "#print(x_test[7,:])\n",
    "result = model.predict(Xtest)\n",
    "\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "pandas.core.series.Series"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "type(Ytest)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "98"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = result.reshape(1,98)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = result[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = np.array(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "Ytest = np.array(Ytest)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([1.38, 1.25, 1.2 , 1.7 , 1.  , 1.3 , 0.94, 1.  , 1.42, 1.61, 1.21,\n",
       "       1.15, 1.2 , 1.2 , 1.15, 1.11, 1.28, 1.  , 1.6 , 1.08, 1.41, 1.1 ,\n",
       "       1.21, 1.32, 1.38, 1.24, 1.2 , 1.5 , 1.28, 1.34, 1.2 , 1.38, 1.4 ,\n",
       "       1.24, 1.51, 1.44, 1.41, 1.24, 1.21, 1.15, 1.22, 1.24, 1.11, 1.48,\n",
       "       1.24, 1.21, 1.08, 1.08, 1.51, 1.28, 1.5 , 1.4 , 1.2 , 1.58, 1.21,\n",
       "       1.41, 1.04, 1.3 , 1.28, 1.15, 1.32, 1.3 , 1.42, 1.22, 1.12, 1.05,\n",
       "       1.1 , 1.22, 1.51, 1.2 , 1.1 , 1.41, 1.34, 1.4 , 1.2 , 1.48, 1.02,\n",
       "       1.1 , 1.34, 1.05, 1.01, 1.4 , 0.71, 1.42, 1.38, 1.2 , 1.41, 1.3 ,\n",
       "       1.3 , 1.4 , 1.1 , 1.14, 1.21, 1.02, 1.2 , 1.31, 1.1 , 1.41])"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Ytest"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.027542462915549317 0.1659592206403408 0.13402094763152483 0.011259246286637548\n"
     ]
    }
   ],
   "source": [
    "mse_score = MSE(Ytest,result)\n",
    "rmse_score = np.sqrt(mse_score)\n",
    "mae_score = mean_absolute_error(Ytest,result)\n",
    "rr_score = r2_score(Ytest,result)\n",
    "\n",
    "print(mse_score, rmse_score, mae_score, rr_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
