{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pandas as pd \n", "from datetime import datetime \n", "from datetime import date\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "import numpy as np\n", "import pandas as pd\n", "from keras.models import Sequential\n", "from keras.layers import LSTM, Dense\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import MinMaxScaler\n", "from keras.callbacks import ModelCheckpoint\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datezone_047_hw_valvertu_004_sat_sp_tnzone_047_tempzone_047_fan_spdrtu_004_fltrd_sa_flow_tnrtu_004_sa_temprtu_004_pa_static_stpt_tnrtu_004_oa_flow_tnrtu_004_oadmpr_pct...zone_047_heating_spUnnamed: 47_yhvac_Shp_hws_temparu_001_cwr_temparu_001_cws_fr_gpmaru_001_cws_temparu_001_hwr_temparu_001_hws_fr_gpmaru_001_hws_temp
02018-01-01 00:00:00100.069.067.520.09265.60466.10.060.00000028.0...NaNNaNNaN75.3NaNNaNNaNNaNNaNNaN
12018-01-01 00:01:00100.069.067.520.09265.60466.00.066572.09916228.0...NaNNaNNaN75.3NaNNaNNaNNaNNaNNaN
22018-01-01 00:02:00100.069.067.520.09708.24066.10.067628.83254228.0...NaNNaNNaN75.3NaNNaNNaNNaNNaNNaN
32018-01-01 00:03:00100.069.067.520.09611.63866.10.067710.29461728.0...NaNNaNNaN75.3NaNNaNNaNNaNNaNNaN
42018-01-01 00:04:00100.069.067.520.09215.11066.00.067139.18409028.0...NaNNaNNaN75.3NaNNaNNaNNaNNaNNaN
..................................................................
20721492020-12-31 23:58:00100.068.063.220.018884.83464.40.062938.32000023.4...71.069.023.145000123.856.2554.7156.4123.4261.6122.36
20721502020-12-31 23:58:00100.068.063.220.018884.83464.40.062938.32000023.4...71.069.023.145000123.856.2554.7156.4123.4261.6122.36
20721512020-12-31 23:59:00100.068.063.220.019345.50864.30.063154.39000023.4...71.069.023.145000123.856.2554.7156.4123.4261.6122.36
20721522020-12-31 23:59:00100.068.063.220.019345.50864.30.063154.39000023.4...71.069.023.145000123.856.2554.7156.4123.4261.6122.36
20721532021-01-01 00:00:00100.068.063.220.018650.23264.10.063076.27000022.9...71.069.023.788947123.856.2554.7156.4123.4261.6122.36
\n", "

2072154 rows × 30 columns

\n", "
" ], "text/plain": [ " date zone_047_hw_valve rtu_004_sat_sp_tn \\\n", "0 2018-01-01 00:00:00 100.0 69.0 \n", "1 2018-01-01 00:01:00 100.0 69.0 \n", "2 2018-01-01 00:02:00 100.0 69.0 \n", "3 2018-01-01 00:03:00 100.0 69.0 \n", "4 2018-01-01 00:04:00 100.0 69.0 \n", "... ... ... ... \n", "2072149 2020-12-31 23:58:00 100.0 68.0 \n", "2072150 2020-12-31 23:58:00 100.0 68.0 \n", "2072151 2020-12-31 23:59:00 100.0 68.0 \n", "2072152 2020-12-31 23:59:00 100.0 68.0 \n", "2072153 2021-01-01 00:00:00 100.0 68.0 \n", "\n", " zone_047_temp zone_047_fan_spd rtu_004_fltrd_sa_flow_tn \\\n", "0 67.5 20.0 9265.604 \n", "1 67.5 20.0 9265.604 \n", "2 67.5 20.0 9708.240 \n", "3 67.5 20.0 9611.638 \n", "4 67.5 20.0 9215.110 \n", "... ... ... ... \n", "2072149 63.2 20.0 18884.834 \n", "2072150 63.2 20.0 18884.834 \n", "2072151 63.2 20.0 19345.508 \n", "2072152 63.2 20.0 19345.508 \n", "2072153 63.2 20.0 18650.232 \n", "\n", " rtu_004_sa_temp rtu_004_pa_static_stpt_tn rtu_004_oa_flow_tn \\\n", "0 66.1 0.06 0.000000 \n", "1 66.0 0.06 6572.099162 \n", "2 66.1 0.06 7628.832542 \n", "3 66.1 0.06 7710.294617 \n", "4 66.0 0.06 7139.184090 \n", "... ... ... ... \n", "2072149 64.4 0.06 2938.320000 \n", "2072150 64.4 0.06 2938.320000 \n", "2072151 64.3 0.06 3154.390000 \n", "2072152 64.3 0.06 3154.390000 \n", "2072153 64.1 0.06 3076.270000 \n", "\n", " rtu_004_oadmpr_pct ... zone_047_heating_sp Unnamed: 47_y \\\n", "0 28.0 ... NaN NaN \n", "1 28.0 ... NaN NaN \n", "2 28.0 ... NaN NaN \n", "3 28.0 ... NaN NaN \n", "4 28.0 ... NaN NaN \n", "... ... ... ... ... \n", "2072149 23.4 ... 71.0 69.0 \n", "2072150 23.4 ... 71.0 69.0 \n", "2072151 23.4 ... 71.0 69.0 \n", "2072152 23.4 ... 71.0 69.0 \n", "2072153 22.9 ... 71.0 69.0 \n", "\n", " hvac_S hp_hws_temp aru_001_cwr_temp aru_001_cws_fr_gpm \\\n", "0 NaN 75.3 NaN NaN \n", "1 NaN 75.3 NaN NaN \n", "2 NaN 75.3 NaN NaN \n", "3 NaN 75.3 NaN NaN \n", "4 NaN 75.3 NaN NaN \n", "... ... ... ... ... \n", "2072149 23.145000 123.8 56.25 54.71 \n", "2072150 23.145000 123.8 56.25 54.71 \n", "2072151 23.145000 123.8 56.25 54.71 \n", "2072152 23.145000 123.8 56.25 54.71 \n", "2072153 23.788947 123.8 56.25 54.71 \n", "\n", " aru_001_cws_temp aru_001_hwr_temp aru_001_hws_fr_gpm \\\n", "0 NaN NaN NaN \n", "1 NaN NaN NaN \n", "2 NaN NaN NaN \n", "3 NaN NaN NaN \n", "4 NaN NaN NaN \n", "... ... ... ... \n", "2072149 56.4 123.42 61.6 \n", "2072150 56.4 123.42 61.6 \n", "2072151 56.4 123.42 61.6 \n", "2072152 56.4 123.42 61.6 \n", "2072153 56.4 123.42 61.6 \n", "\n", " aru_001_hws_temp \n", "0 NaN \n", "1 NaN \n", "2 NaN \n", "3 NaN \n", "4 NaN \n", "... ... \n", "2072149 122.36 \n", "2072150 122.36 \n", "2072151 122.36 \n", "2072152 122.36 \n", "2072153 122.36 \n", "\n", "[2072154 rows x 30 columns]" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "merged = pd.read_csv(r'C:\\Users\\jerin\\Downloads\\lbnlbldg59\\lbnlbldg59\\lbnlbldg59.processed\\LBNLBLDG59\\clean_Bldg59_2018to2020\\clean data\\long_merge.csv')\n", "\n", "zone = \"47\"\n", "\n", "if zone in [\"36\", \"37\", \"38\", \"39\", \"40\", \"41\", \"42\", \"64\", \"65\", \"66\", \"67\", \"68\", \"69\", \"70\"]:\n", " rtu = \"rtu_001\"\n", " wing = \"hvac_N\"\n", "elif zone in [\"18\", \"25\", \"26\", \"45\", \"48\", \"55\", \"56\", \"61\"]:\n", " rtu = \"rtu_003\"\n", " wing = \"hvac_S\"\n", "elif zone in [\"16\", \"17\", \"21\", \"22\", \"23\", \"24\", \"46\", \"47\", \"51\", \"52\", \"53\", \"54\"]:\n", " rtu = \"rtu_004\"\n", " wing = \"hvac_S\"\n", "else:\n", " rtu = \"rtu_002\"\n", " wing = \"hvac_N\"\n", "#merged is the dataframe\n", "sorted = merged[[\"date\"]+[col for col in merged.columns if zone in col or rtu in col or wing in col]+[\"hp_hws_temp\", \"aru_001_cwr_temp\" , \"aru_001_cws_fr_gpm\" ,\"aru_001_cws_temp\",\"aru_001_hwr_temp\" ,\"aru_001_hws_fr_gpm\" ,\"aru_001_hws_temp\"]]\n", "sorted" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "date 0\n", "zone_047_hw_valve 0\n", "rtu_004_sat_sp_tn 0\n", "zone_047_temp 0\n", "zone_047_fan_spd 0\n", "rtu_004_fltrd_sa_flow_tn 0\n", "rtu_004_sa_temp 0\n", "rtu_004_pa_static_stpt_tn 0\n", "rtu_004_oa_flow_tn 0\n", "rtu_004_oadmpr_pct 0\n", "rtu_004_econ_stpt_tn 0\n", "rtu_004_ra_temp 0\n", "rtu_004_oa_temp 0\n", "rtu_004_ma_temp 0\n", "rtu_004_sf_vfd_spd_fbk_tn 0\n", "rtu_004_rf_vfd_spd_fbk_tn 0\n", "rtu_004_fltrd_gnd_lvl_plenum_press_tn 0\n", "rtu_004_fltrd_lvl2_plenum_press_tn 0\n", "zone_047_cooling_sp 0\n", "Unnamed: 47_x 394570\n", "zone_047_heating_sp 0\n", "Unnamed: 47_y 394570\n", "hvac_S 13035\n", "hp_hws_temp 0\n", "aru_001_cwr_temp 524350\n", "aru_001_cws_fr_gpm 524350\n", "aru_001_cws_temp 524350\n", "aru_001_hwr_temp 299165\n", "aru_001_hws_fr_gpm 299165\n", "aru_001_hws_temp 299165\n", "dtype: int64" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "final_df = sorted.copy()\n", "final_df['date'] = pd.to_datetime(final_df['date'], format = \"%Y-%m-%d %H:%M:%S\")\n", "final_df = final_df[ (final_df.date.dt.date >date(2019, 4, 1)) & (final_df.date.dt.date< date(2020, 2, 15))]\n", "final_df.isna().sum()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "testdataset_df = final_df[(final_df.date.dt.date date(2019, 11, 8))]\n", "\n", "testdataset = testdataset_df[['rtu_004_oa_temp','rtu_004_ra_temp','hp_hws_temp','rtu_004_oa_flow_tn','rtu_004_oadmpr_pct',\n", " 'rtu_004_sat_sp_tn','rtu_004_rf_vfd_spd_fbk_tn','rtu_004_ma_temp','rtu_004_sa_temp','rtu_004_fltrd_sa_flow_tn',\n", " 'rtu_004_sf_vfd_spd_fbk_tn']].values\n", "\n", "\n", "traindataset = traindataset_df[['rtu_004_oa_temp','rtu_004_ra_temp','hp_hws_temp','rtu_004_oa_flow_tn','rtu_004_oadmpr_pct',\n", " 'rtu_004_sat_sp_tn','rtu_004_rf_vfd_spd_fbk_tn','rtu_004_ma_temp','rtu_004_sa_temp','rtu_004_fltrd_sa_flow_tn',\n", " 'rtu_004_sf_vfd_spd_fbk_tn']].values" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "traindataset = traindataset.astype('float32')\n", "testdataset = testdataset.astype('float32')\n", "\n", "\n", "scaler = MinMaxScaler(feature_range=(0, 1))\n", "traindataset = scaler.fit_transform(traindataset)\n", "testdataset = scaler.transform(testdataset)" ] }, { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\jerin\\anaconda3\\envs\\smartbuilding\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:205: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n", " super().__init__(**kwargs)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0071\n", "Epoch 1: val_loss improved from inf to 0.01145, saving model to lstm2.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m77s\u001b[0m 23ms/step - loss: 0.0071 - val_loss: 0.0115\n", "Epoch 2/10\n", "\u001b[1m3217/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0013\n", "Epoch 2: val_loss improved from 0.01145 to 0.01144, saving model to lstm2.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m73s\u001b[0m 23ms/step - loss: 0.0013 - val_loss: 0.0114\n", "Epoch 3/10\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0010\n", "Epoch 3: val_loss improved from 0.01144 to 0.00729, saving model to lstm2.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m71s\u001b[0m 22ms/step - loss: 0.0010 - val_loss: 0.0073\n", "Epoch 4/10\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 5.5876e-04\n", "Epoch 4: val_loss improved from 0.00729 to 0.00409, saving model to lstm2.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m73s\u001b[0m 23ms/step - loss: 5.5871e-04 - val_loss: 0.0041\n", "Epoch 5/10\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 3.9261e-04\n", "Epoch 5: val_loss improved from 0.00409 to 0.00386, saving model to lstm2.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m72s\u001b[0m 22ms/step - loss: 3.9260e-04 - val_loss: 0.0039\n", "Epoch 6/10\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 3.3977e-04\n", "Epoch 6: val_loss did not improve from 0.00386\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m70s\u001b[0m 22ms/step - loss: 3.3976e-04 - val_loss: 0.0049\n", "Epoch 7/10\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 3.0365e-04\n", "Epoch 7: val_loss did not improve from 0.00386\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m69s\u001b[0m 22ms/step - loss: 3.0364e-04 - val_loss: 0.0052\n", "Epoch 8/10\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 2.7422e-04\n", "Epoch 8: val_loss did not improve from 0.00386\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m70s\u001b[0m 22ms/step - loss: 2.7422e-04 - val_loss: 0.0052\n", "Epoch 9/10\n", "\u001b[1m3217/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 2.5380e-04\n", "Epoch 9: val_loss did not improve from 0.00386\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m73s\u001b[0m 23ms/step - loss: 2.5379e-04 - val_loss: 0.0058\n", "Epoch 10/10\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 2.3404e-04\n", "Epoch 10: val_loss did not improve from 0.00386\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m72s\u001b[0m 22ms/step - loss: 2.3403e-04 - val_loss: 0.0099\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 47, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train,test = traindataset,testdataset\n", "\n", "def create_dataset(dataset,time_step):\n", " x1,x2,x3,x4,x5,x6,x7,x8,x9,Y = [],[],[],[],[],[],[],[],[],[]\n", " for i in range(len(dataset)-time_step-1):\n", " x1.append(dataset[i:(i+time_step), 0])\n", " x2.append(dataset[i:(i+time_step), 1])\n", " x3.append(dataset[i:(i+time_step), 2])\n", " x4.append(dataset[i:(i+time_step), 3])\n", " x5.append(dataset[i:(i+time_step), 4])\n", " x6.append(dataset[i:(i+time_step), 5])\n", " x7.append(dataset[i:(i+time_step), 6])\n", " x8.append(dataset[i:(i+time_step), 7])\n", " # x9.append(dataset[i:(i+time_step), 8])\n", " Y.append([dataset[i + time_step, 7]])\n", " x1,x2,x3,x4,x5,x6,x7,x8 = np.array(x1),np.array(x2),np.array(x3), np.array(x4),np.array(x5),np.array(x6),np.array(x7),np.array(x8)#,np.array(x9)\n", " Y = np.reshape(Y,(len(Y),1))\n", " return np.stack([x1,x2,x3,x4,x5,x6,x7,x8],axis=2),Y\n", "\n", "\n", "\n", "\n", "time_step = 30\n", "X_train, y_train = create_dataset(train, time_step)\n", "X_test, y_test = create_dataset(test, time_step)\n", "\n", "\n", "model = Sequential()\n", "model.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))\n", "model.add(LSTM(units=50, return_sequences=True))\n", "model.add(LSTM(units=30))\n", "model.add(Dense(units=1))\n", "\n", "model.compile(optimizer='adam', loss='mean_squared_error')\n", "\n", "checkpoint_path = \"lstm2.keras\"\n", "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n", "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64, verbose=1, callbacks=[checkpoint_callback])\n" ] }, { "cell_type": "code", "execution_count": 45, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/5\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 19ms/step - loss: 1.8977e-04\n", "Epoch 1: val_loss improved from inf to 0.01131, saving model to lstm2.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m94s\u001b[0m 29ms/step - loss: 1.8977e-04 - val_loss: 0.0113\n", "Epoch 2/5\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 18ms/step - loss: 1.7357e-04\n", "Epoch 2: val_loss did not improve from 0.01131\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m91s\u001b[0m 28ms/step - loss: 1.7358e-04 - val_loss: 0.0123\n", "Epoch 3/5\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 18ms/step - loss: 1.6701e-04\n", "Epoch 3: val_loss did not improve from 0.01131\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m92s\u001b[0m 28ms/step - loss: 1.6701e-04 - val_loss: 0.0127\n", "Epoch 4/5\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 18ms/step - loss: 1.7043e-04\n", "Epoch 4: val_loss did not improve from 0.01131\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m91s\u001b[0m 28ms/step - loss: 1.7043e-04 - val_loss: 0.0131\n", "Epoch 5/5\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 19ms/step - loss: 1.6319e-04\n", "Epoch 5: val_loss did not improve from 0.01131\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m104s\u001b[0m 32ms/step - loss: 1.6319e-04 - val_loss: 0.0134\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 45, "metadata": {}, "output_type": "execute_result" } ], "source": [ "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n", "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=5, batch_size=64, verbose=1, callbacks=[checkpoint_callback])" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[1m9900/9900\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m34s\u001b[0m 3ms/step\n" ] } ], "source": [ "# train_predict = model.predict(X_train)\n", "test_predict = model.predict(X_test)" ] }, { "cell_type": "code", "execution_count": 49, "metadata": {}, "outputs": [], "source": [ "%matplotlib qt\n", "#'rtu_004_ma_temp','rtu_004_sa_temp'\n", "var = 0\n", "plt.plot(testdataset_df['date'][31:],y_test, label='Original Testing Data', color='blue')\n", "plt.plot(testdataset_df['date'][31:],test_predict, label='Predicted Testing Data', color='red',alpha=0.8)\n", "# anomalies = np.where(abs(test_predict[:,var] - y_test[:,var]) > 0.38)[0]\n", "# plt.scatter(anomalies,test_predict[anomalies,var], color='black',marker =\"o\",s=100 )\n", "\n", "\n", "plt.title('Testing Data - Predicted vs Actual')\n", "plt.xlabel('Time')\n", "plt.ylabel('Value')\n", "plt.legend()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": 50, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. \n" ] } ], "source": [ "from tensorflow.keras.models import load_model\n", "# model.save(\"MA_temp_model.h5\") \n", "# loaded_model = load_model(\"MA_temp_model.h5\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "ENERGY DATA" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datertu_001_sat_sp_tnrtu_002_sat_sp_tnrtu_003_sat_sp_tnrtu_004_sat_sp_tnrtu_001_fltrd_sa_flow_tnrtu_002_fltrd_sa_flow_tnrtu_003_fltrd_sa_flow_tnrtu_004_fltrd_sa_flow_tnrtu_001_sa_temp...rtu_001_fltrd_gnd_lvl_plenum_press_tnrtu_002_fltrd_gnd_lvl_plenum_press_tnrtu_003_fltrd_gnd_lvl_plenum_press_tnrtu_004_fltrd_gnd_lvl_plenum_press_tnrtu_001_fltrd_lvl2_plenum_press_tnrtu_002_fltrd_lvl2_plenum_press_tnrtu_003_fltrd_lvl2_plenum_press_tnrtu_004_fltrd_lvl2_plenum_press_tnhvac_Nhvac_S
02018-01-01 00:00:0068.070.065.069.014131.44913998.75713558.5399265.60467.6...0.0300.040.040.0470.0500.050.050.050NaNNaN
12018-01-01 00:01:0068.070.065.069.014164.42914065.25913592.9099265.60467.6...0.0310.040.040.0430.0480.050.040.046NaNNaN
\n", "

2 rows × 59 columns

\n", "
" ], "text/plain": [ " date rtu_001_sat_sp_tn rtu_002_sat_sp_tn \\\n", "0 2018-01-01 00:00:00 68.0 70.0 \n", "1 2018-01-01 00:01:00 68.0 70.0 \n", "\n", " rtu_003_sat_sp_tn rtu_004_sat_sp_tn rtu_001_fltrd_sa_flow_tn \\\n", "0 65.0 69.0 14131.449 \n", "1 65.0 69.0 14164.429 \n", "\n", " rtu_002_fltrd_sa_flow_tn rtu_003_fltrd_sa_flow_tn \\\n", "0 13998.757 13558.539 \n", "1 14065.259 13592.909 \n", "\n", " rtu_004_fltrd_sa_flow_tn rtu_001_sa_temp ... \\\n", "0 9265.604 67.6 ... \n", "1 9265.604 67.6 ... \n", "\n", " rtu_001_fltrd_gnd_lvl_plenum_press_tn \\\n", "0 0.030 \n", "1 0.031 \n", "\n", " rtu_002_fltrd_gnd_lvl_plenum_press_tn \\\n", "0 0.04 \n", "1 0.04 \n", "\n", " rtu_003_fltrd_gnd_lvl_plenum_press_tn \\\n", "0 0.04 \n", "1 0.04 \n", "\n", " rtu_004_fltrd_gnd_lvl_plenum_press_tn rtu_001_fltrd_lvl2_plenum_press_tn \\\n", "0 0.047 0.050 \n", "1 0.043 0.048 \n", "\n", " rtu_002_fltrd_lvl2_plenum_press_tn rtu_003_fltrd_lvl2_plenum_press_tn \\\n", "0 0.05 0.05 \n", "1 0.05 0.04 \n", "\n", " rtu_004_fltrd_lvl2_plenum_press_tn hvac_N hvac_S \n", "0 0.050 NaN NaN \n", "1 0.046 NaN NaN \n", "\n", "[2 rows x 59 columns]" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "zone = [\"18\", \"25\", \"26\", \"45\", \"48\", \"55\", \"56\", \"61\",\"16\", \"17\", \"21\", \"23\", \"24\", \"46\", \"47\", \"51\", \"52\", \"53\", \"54\"]\n", "rtu = [\"rtu_001\",\"rtu_002\",\"rtu_003\",\"rtu_004\"]\n", "wing = [\"hvac_N\",\"hvac_S\"]\n", "# any(sub in col for sub in zone) or\n", "energy_data = merged[[\"date\"]+[col for col in merged.columns if any(sub in col for sub in wing) or any(sub in col for sub in rtu)]]\n", "df_filtered = energy_data[[col for col in energy_data.columns if 'Unnamed' not in col]]\n", "df_filtered = df_filtered[[col for col in df_filtered.columns if 'co2' not in col]]\n", "df_filtered = df_filtered[[col for col in df_filtered.columns if 'templogger' not in col]]\n", "# df_filtered = df_filtered.dropna()\n", "df_filtered.head(2)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "There are NA values in the DataFrame columns.\n" ] } ], "source": [ "df_filtered['date'] = pd.to_datetime(df_filtered['date'], format = \"%Y-%m-%d %H:%M:%S\")\n", "df_filtered = df_filtered[ (df_filtered.date.dt.date >date(2019, 4, 1)) & (df_filtered.date.dt.date< date(2020, 2, 15))]\n", "# df_filtered.isna().sum()\n", "if df_filtered.isna().any().any():\n", " print(\"There are NA values in the DataFrame columns.\")" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "testdataset_df = df_filtered[(df_filtered.date.dt.date date(2019, 11, 8))]\n", "\n", "testdataset = testdataset_df.drop(columns=[\"date\"]).values\n", "\n", "traindataset = traindataset_df.drop(columns=[\"date\"]).values\n", "\n", "columns_with_na = traindataset_df.columns[traindataset_df.isna().any()].tolist()\n", "columns_with_na" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "traindataset = traindataset.astype('float32')\n", "testdataset = testdataset.astype('float32')\n", "\n", "scaler = MinMaxScaler(feature_range=(0, 1))\n", "traindataset = scaler.fit_transform(traindataset)\n", "testdataset = scaler.transform(testdataset)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\jerin\\anaconda3\\envs\\smartbuilding\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:205: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n", " super().__init__(**kwargs)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/15\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 0.0038\n", "Epoch 1: val_loss improved from inf to 0.00894, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m144s\u001b[0m 44ms/step - loss: 0.0038 - val_loss: 0.0089\n", "Epoch 2/15\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 32ms/step - loss: 5.4854e-04\n", "Epoch 2: val_loss improved from 0.00894 to 0.00529, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m137s\u001b[0m 43ms/step - loss: 5.4854e-04 - val_loss: 0.0053\n", "Epoch 3/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 5.0405e-04\n", "Epoch 3: val_loss did not improve from 0.00529\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m129s\u001b[0m 40ms/step - loss: 5.0405e-04 - val_loss: 0.0063\n", "Epoch 4/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.9573e-04\n", "Epoch 4: val_loss did not improve from 0.00529\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m131s\u001b[0m 41ms/step - loss: 4.9572e-04 - val_loss: 0.0061\n", "Epoch 5/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 31ms/step - loss: 4.9666e-04\n", "Epoch 5: val_loss did not improve from 0.00529\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m135s\u001b[0m 42ms/step - loss: 4.9665e-04 - val_loss: 0.0058\n", "Epoch 6/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.7853e-04\n", "Epoch 6: val_loss improved from 0.00529 to 0.00512, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m129s\u001b[0m 40ms/step - loss: 4.7852e-04 - val_loss: 0.0051\n", "Epoch 7/15\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 29ms/step - loss: 4.3858e-04\n", "Epoch 7: val_loss improved from 0.00512 to 0.00386, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m129s\u001b[0m 40ms/step - loss: 4.3859e-04 - val_loss: 0.0039\n", "Epoch 8/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.4643e-04\n", "Epoch 8: val_loss improved from 0.00386 to 0.00321, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m129s\u001b[0m 40ms/step - loss: 4.4643e-04 - val_loss: 0.0032\n", "Epoch 9/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.3562e-04\n", "Epoch 9: val_loss improved from 0.00321 to 0.00267, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m130s\u001b[0m 40ms/step - loss: 4.3562e-04 - val_loss: 0.0027\n", "Epoch 10/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.3336e-04\n", "Epoch 10: val_loss did not improve from 0.00267\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m130s\u001b[0m 40ms/step - loss: 4.3336e-04 - val_loss: 0.0029\n", "Epoch 11/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.2932e-04\n", "Epoch 11: val_loss did not improve from 0.00267\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m130s\u001b[0m 40ms/step - loss: 4.2932e-04 - val_loss: 0.0032\n", "Epoch 12/15\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.1954e-04\n", "Epoch 12: val_loss improved from 0.00267 to 0.00248, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m128s\u001b[0m 40ms/step - loss: 4.1954e-04 - val_loss: 0.0025\n", "Epoch 13/15\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 29ms/step - loss: 4.2671e-04\n", "Epoch 13: val_loss improved from 0.00248 to 0.00245, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m130s\u001b[0m 40ms/step - loss: 4.2671e-04 - val_loss: 0.0024\n", "Epoch 14/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.1718e-04\n", "Epoch 14: val_loss did not improve from 0.00245\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m129s\u001b[0m 40ms/step - loss: 4.1717e-04 - val_loss: 0.0031\n", "Epoch 15/15\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 30ms/step - loss: 4.0550e-04\n", "Epoch 15: val_loss did not improve from 0.00245\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m129s\u001b[0m 40ms/step - loss: 4.0550e-04 - val_loss: 0.0025\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train,test = traindataset,testdataset\n", "\n", "def create_dataset(dataset,time_step):\n", " x = [[] for _ in range(58)] \n", " Y = []\n", " for i in range(len(dataset) - time_step - 1):\n", " for j in range(58):\n", " x[j].append(dataset[i:(i + time_step), j])\n", " Y.append([dataset[i + time_step, 56],dataset[i + time_step, 57]])\n", " x= [np.array(feature_list) for feature_list in x]\n", " Y = np.reshape(Y,(len(Y),2))\n", " return np.stack(x,axis=2),Y\n", "\n", "time_step = 60\n", "X_train, y_train = create_dataset(train, time_step)\n", "X_test, y_test = create_dataset(test, time_step)\n", "\n", "\n", "model = Sequential()\n", "model.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))\n", "model.add(LSTM(units=50, return_sequences=True))\n", "model.add(LSTM(units=50))\n", "model.add(Dense(units=2))\n", "\n", "model.compile(optimizer='adam', loss='mean_squared_error')\n", "\n", "checkpoint_path = \"lstm3.keras\"\n", "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n", "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=15, batch_size=64, verbose=1, callbacks=[checkpoint_callback])\n" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0050\n", "Epoch 1: val_loss improved from inf to 0.03991, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m68s\u001b[0m 21ms/step - loss: 0.0050 - val_loss: 0.0399\n", "Epoch 2/10\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0050\n", "Epoch 2: val_loss did not improve from 0.03991\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m67s\u001b[0m 21ms/step - loss: 0.0050 - val_loss: 0.0480\n", "Epoch 3/10\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0048\n", "Epoch 3: val_loss did not improve from 0.03991\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m67s\u001b[0m 21ms/step - loss: 0.0048 - val_loss: 0.0474\n", "Epoch 4/10\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0047\n", "Epoch 4: val_loss did not improve from 0.03991\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m68s\u001b[0m 21ms/step - loss: 0.0047 - val_loss: 0.0492\n", "Epoch 5/10\n", "\u001b[1m3217/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0048\n", "Epoch 5: val_loss improved from 0.03991 to 0.03753, saving model to lstm3.keras\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m68s\u001b[0m 21ms/step - loss: 0.0048 - val_loss: 0.0375\n", "Epoch 6/10\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0046\n", "Epoch 6: val_loss did not improve from 0.03753\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m68s\u001b[0m 21ms/step - loss: 0.0046 - val_loss: 0.0466\n", "Epoch 7/10\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0043\n", "Epoch 7: val_loss did not improve from 0.03753\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m67s\u001b[0m 21ms/step - loss: 0.0043 - val_loss: 0.0499\n", "Epoch 8/10\n", "\u001b[1m3219/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0043\n", "Epoch 8: val_loss did not improve from 0.03753\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m68s\u001b[0m 21ms/step - loss: 0.0043 - val_loss: 0.0483\n", "Epoch 9/10\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0042\n", "Epoch 9: val_loss did not improve from 0.03753\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m69s\u001b[0m 22ms/step - loss: 0.0042 - val_loss: 0.0559\n", "Epoch 10/10\n", "\u001b[1m3218/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0044\n", "Epoch 10: val_loss did not improve from 0.03753\n", "\u001b[1m3220/3220\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m68s\u001b[0m 21ms/step - loss: 0.0044 - val_loss: 0.0470\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 39, "metadata": {}, "output_type": "execute_result" } ], "source": [ "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n", "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64, verbose=1, callbacks=[checkpoint_callback])" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[1m6344/6344\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m47s\u001b[0m 7ms/step\n" ] } ], "source": [ "test_predict1 = model.predict(X_test)\n", "# train_predict1 = model.predict(X_train)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "%matplotlib qt\n", "var = 0\n", "plt.plot(testdataset_df['date'][61:],y_test[:,0], label='Original Testing Data', color='blue')\n", "plt.plot(testdataset_df['date'][61:],test_predict1[:,0], label='Predicted Testing Data', color='red',alpha=0.8)\n", "# anomalies = np.where(abs(test_predict[:,var] - y_test[:,var]) > 0.38)[0]\n", "# plt.scatter(anomalies,test_predict[anomalies,var], color='black',marker =\"o\",s=100 )\n", "\n", "\n", "plt.title('Testing Data - Predicted vs Actual')\n", "plt.xlabel('Time')\n", "plt.ylabel('Value')\n", "plt.legend()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. \n" ] } ], "source": [ "from tensorflow.keras.models import load_model\n", "model.save(\"energy_model.h5\") " ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[]" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%matplotlib qt\n", "plt.plot(df_filtered['date'],df_filtered['hvac_S'])\n", "plt.plot(df_filtered['date'],df_filtered['rtu_003_sf_vfd_spd_fbk_tn'])\n", "plt.plot(df_filtered['date'],df_filtered['zone_025_temp'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "smartbuilding", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.8" } }, "nbformat": 4, "nbformat_minor": 2 }