{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "import numpy as np\n",
    "import pandas as pd #缩写\n",
    "import os\n",
    "import math\n",
    "from keras.models import Sequential #建立一个序贯型模型：理解上更倾向于解决问题的思路\n",
    "from keras.layers import Dense#全连接网络\n",
    "from keras.layers import LSTM #时间序列神经网络  RNN-gru lstm rnn \n",
    "from keras.layers import Dropout #休眠神经网络\n",
    "from keras.callbacks import EarlyStopping\n",
    "from sklearn.preprocessing import MinMaxScaler  #scikit-learn\n",
    "from sklearn.metrics import mean_squared_error #均方误差\n",
    "from keras.models import load_model #加载模型函数\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#create_data_set 的举例，用20个样本举例得到训练数据集的过程\n",
    "# 1\n",
    "# 2\n",
    "# 3\n",
    "# 4\n",
    "# 5\n",
    "# 6\n",
    "# 7\n",
    "# 8\n",
    "# 9\n",
    "# 10\n",
    "# 11\n",
    "# 12\n",
    "# 12\n",
    "# 13\n",
    "# 45\n",
    "# 58\n",
    "# 41\n",
    "# 42\n",
    "# 52\n",
    "# 12\n",
    "\n",
    "#  14 i=0~13  20-5-2 = 13\n",
    "# 12345 67\n",
    "# 23456 78\n",
    "# 34567 89\n",
    "#  。。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class RNNModel(object):\n",
    "    def __init__(self,df,look_back=1, pred_length=1, epochs=20, batch_size=1, verbose=2, patience=10,model_name='best_model.h5',scaler_name='s.pkl'):\n",
    "        self.look_back = look_back  #训练数据的项数\n",
    "        self.pred_length = pred_length #预测数据的项数\n",
    "        self.epochs = epochs #训练的次数，适当就好，不是越多越好，一般在50-100即可了,数据达到收敛\n",
    "        self.batch_size = batch_size #批次的样本数，即每单次训练用多少组训练数据  总训练次数 = 样本数(N)/批次数(batch_size)*训练次数(epochs)\n",
    "        self.verbose = verbose #显示训练过程 0 是不显示，1是显示查看 log\n",
    "        self.patience = patience #提前停止训练条件的损失数量，若该值为10，意味着模型的损失10次不变，就可以停止训练了  损失值收敛\n",
    "        self.stock = df #输入的数据\n",
    "        self.model_name = model_name #训练后的模型名字\n",
    "        self.scaler_name = scaler_name #数据处理后的处理函数名字\n",
    "\n",
    "    #加载数据，并且做归一化处理     data_frame是要训练的数据，eg:temp_df.csv\n",
    "    def access_data(self, data_frame):\n",
    "        #加载数据\n",
    "        data_set = data_frame.values #第一列的数据[[2],[3],[4]...]\n",
    "        data_set = data_set.astype('float32')\n",
    "        print(data_set.shape) #输出dataset的大小\n",
    "        #归一化处理，归一化到0-1之间\n",
    "        scaler = MinMaxScaler(feature_range=(0, 1)) \n",
    "        data_set = scaler.fit_transform(data_set)\n",
    "        # reshape into X=t and Y=t+1，得到训练和测试数据\n",
    "        train_x, train_y, test = self.create_data_set(data_set)\n",
    "        # reshape input to be [samples, time steps, features] 将样本转化成可以输入到lstm中的格式\n",
    "        #[[1,2,3,4,5]] 1*5-->1*1*5[[[1,2,3,4,5]],[[2,3,4,5,6]]] [[6],[7]]\n",
    "        train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))\n",
    "        return train_x, train_y, test, scaler #训练数据集、训练结果集、测试数据集、归一化函数\n",
    "\n",
    "    # 数据分割，将数据分成训练集和测试集\n",
    "    def create_data_set(self, data_set):\n",
    "        data_x, data_y = [], []\n",
    "        for i in range(len(data_set)-self.look_back - self.pred_length):\n",
    "            a = data_set[i:(i + self.look_back), 0]\n",
    "            data_x.append(a)\n",
    "            data_y.append(list(data_set[i + self.look_back: i + self.look_back + self.pred_length, 0]))\n",
    "        print(data_x[:2])\n",
    "        print('*********************')\n",
    "        print(data_y[:2])\n",
    "        print('*********************')\n",
    "        print( 'test:',data_set[-self.look_back:, 0].reshape(1, 1, self.look_back))\n",
    "        return np.array(data_x), np.array(data_y), data_set[-self.look_back:, 0].reshape(1, 1, self.look_back)\n",
    "    #构建rnn模型，运用双层的rnn模型用来训练模型\n",
    "    #默认的阈值设置：样本数0-1w  一层lstm+参数 32\n",
    "    #样本数2-10w  两层lstm+参数 64+32\n",
    "    #样本数10w-30w  两层lstm+参数 64+64\n",
    "    def rnn_model(self, train_x, train_y,te_x,te_y, epochs):\n",
    "        model = Sequential()\n",
    "        model.add(LSTM(64, input_shape=(1, self.look_back), return_sequences=True))#30w的数据集需要把里面的参数扩大2倍\n",
    "        model.add(LSTM(32, return_sequences=False))#30w的数据集需要把里面的参数扩大2倍\n",
    "        model.add(Dense(16))#30w的数据集需要把里面的参数扩大2倍\n",
    "        model.add(Dense(self.pred_length))\n",
    "        model.compile(loss='mean_squared_error', optimizer='adam',metrics=['mae','mse']) #绝对值平均误差 均方误差\n",
    "#         model.summary()\n",
    "        early_stopping = EarlyStopping('loss', patience=self.patience)\n",
    "        history = model.fit(train_x, train_y,validation_data=(te_x, te_y), epochs=epochs, batch_size=self.batch_size, verbose=self.verbose, callbacks=[early_stopping])\n",
    "        return model,history\n",
    "    #对样本进行预测，得到预测结果\n",
    "    def predict(self, model, data):\n",
    "        prediction = model.predict(data)\n",
    "        return prediction\n",
    "    #开始跑模型，得到模型结果和最终预测结果\n",
    "    def run(self):\n",
    "        train_x, train_y, test, scaler = self.access_data(self.stock)\n",
    "        tr_x,te_x,tr_y,te_y = train_x[:10000],train_x[10000:],train_y[:10000],train_y[10000:]\n",
    "        model,history = self.rnn_model(tr_x, tr_y,te_x,te_y,self.epochs)\n",
    "        #保存模型和scaler\n",
    "        model.save(self.model_name)\n",
    "        import pickle\n",
    "        with open(self.scaler_name, 'wb') as fw:\n",
    "            pickle.dump(scaler, fw)\n",
    "        predict = self.predict(model, test)\n",
    "        #得到未来预测的数据\n",
    "        stock = scaler.inverse_transform(predict).reshape(self.pred_length, 1)\n",
    "        print('test predict:',stock)\n",
    "        return np.squeeze(stock) #输出的结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>number</th>\n",
       "      <th>date</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>23.5</td>\n",
       "      <td>2021-09-01 00:04:54</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>23.4</td>\n",
       "      <td>2021-09-01 00:15:06</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>23.4</td>\n",
       "      <td>2021-09-01 00:25:18</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>23.3</td>\n",
       "      <td>2021-09-01 00:35:09</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>23.4</td>\n",
       "      <td>2021-09-01 00:45:21</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   number                 date\n",
       "0    23.5  2021-09-01 00:04:54\n",
       "1    23.4  2021-09-01 00:15:06\n",
       "2    23.4  2021-09-01 00:25:18\n",
       "3    23.3  2021-09-01 00:35:09\n",
       "4    23.4  2021-09-01 00:45:21"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#查看下关于湿度的数据\n",
    "p = r'humd_df.csv'\n",
    "df = pd.read_csv(p)\n",
    "df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(17313, 1)\n",
      "[array([0.55      , 0.546875  , 0.546875  , 0.54375   , 0.546875  ,\n",
      "       0.54375   , 0.5375    , 0.53125   , 0.53125   , 0.528125  ,\n",
      "       0.52500004, 0.521875  , 0.521875  , 0.51875   , 0.51875   ,\n",
      "       0.521875  , 0.515625  , 0.5125    , 0.50937504, 0.503125  ,\n",
      "       0.50625   , 0.50625   , 0.503125  , 0.50625   , 0.5       ,\n",
      "       0.496875  , 0.49062502, 0.49062502, 0.4875    , 0.49062502],\n",
      "      dtype=float32), array([0.546875  , 0.546875  , 0.54375   , 0.546875  , 0.54375   ,\n",
      "       0.5375    , 0.53125   , 0.53125   , 0.528125  , 0.52500004,\n",
      "       0.521875  , 0.521875  , 0.51875   , 0.51875   , 0.521875  ,\n",
      "       0.515625  , 0.5125    , 0.50937504, 0.503125  , 0.50625   ,\n",
      "       0.50625   , 0.503125  , 0.50625   , 0.5       , 0.496875  ,\n",
      "       0.49062502, 0.49062502, 0.4875    , 0.49062502, 0.49062502],\n",
      "      dtype=float32)]\n",
      "*********************\n",
      "[[0.49062502, 0.49062502, 0.484375, 0.484375, 0.4875], [0.49062502, 0.484375, 0.484375, 0.4875, 0.4875]]\n",
      "*********************\n",
      "test: [[[0.3        0.296875   0.29062498 0.28750002 0.28750002 0.28125\n",
      "   0.28125    0.278125   0.27187502 0.27187502 0.26875    0.265625\n",
      "   0.2625     0.2625     0.2625     0.2625     0.265625   0.25937498\n",
      "   0.25625002 0.253125   0.253125   0.253125   0.25625002 0.253125\n",
      "   0.24999999 0.246875   0.24374999 0.24374999 0.24062501 0.2375    ]]]\n",
      "Train on 10000 samples, validate on 7278 samples\n",
      "Epoch 1/30\n",
      "10000/10000 [==============================] - 1s 134us/step - loss: 0.0284 - mae: 0.1031 - mse: 0.0284 - val_loss: 0.0051 - val_mae: 0.0631 - val_mse: 0.0051\n",
      "Epoch 2/30\n",
      "10000/10000 [==============================] - 1s 67us/step - loss: 0.0029 - mae: 0.0345 - mse: 0.0029 - val_loss: 0.0025 - val_mae: 0.0431 - val_mse: 0.0025\n",
      "Epoch 3/30\n",
      "10000/10000 [==============================] - 1s 69us/step - loss: 0.0019 - mae: 0.0252 - mse: 0.0019 - val_loss: 0.0018 - val_mae: 0.0352 - val_mse: 0.0018\n",
      "Epoch 4/30\n",
      "10000/10000 [==============================] - 1s 67us/step - loss: 0.0017 - mae: 0.0230 - mse: 0.0017 - val_loss: 0.0013 - val_mae: 0.0295 - val_mse: 0.0013\n",
      "Epoch 5/30\n",
      "10000/10000 [==============================] - 1s 69us/step - loss: 0.0015 - mae: 0.0216 - mse: 0.0015 - val_loss: 0.0010 - val_mae: 0.0249 - val_mse: 0.0010\n",
      "Epoch 6/30\n",
      "10000/10000 [==============================] - 1s 72us/step - loss: 0.0014 - mae: 0.0199 - mse: 0.0014 - val_loss: 9.7542e-04 - val_mae: 0.0244 - val_mse: 9.7542e-04\n",
      "Epoch 7/30\n",
      "10000/10000 [==============================] - 1s 72us/step - loss: 0.0013 - mae: 0.0197 - mse: 0.0013 - val_loss: 7.7793e-04 - val_mae: 0.0201 - val_mse: 7.7793e-04\n",
      "Epoch 8/30\n",
      "10000/10000 [==============================] - 1s 69us/step - loss: 0.0012 - mae: 0.0188 - mse: 0.0012 - val_loss: 6.3688e-04 - val_mae: 0.0173 - val_mse: 6.3688e-04\n",
      "Epoch 9/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0012 - mae: 0.0190 - mse: 0.0012 - val_loss: 5.9641e-04 - val_mae: 0.0163 - val_mse: 5.9641e-04\n",
      "Epoch 10/30\n",
      "10000/10000 [==============================] - 1s 71us/step - loss: 0.0012 - mae: 0.0182 - mse: 0.0012 - val_loss: 6.7820e-04 - val_mae: 0.0181 - val_mse: 6.7820e-04\n",
      "Epoch 11/30\n",
      "10000/10000 [==============================] - 1s 69us/step - loss: 0.0011 - mae: 0.0181 - mse: 0.0011 - val_loss: 5.4313e-04 - val_mae: 0.0154 - val_mse: 5.4314e-04\n",
      "Epoch 12/30\n",
      "10000/10000 [==============================] - 1s 67us/step - loss: 0.0011 - mae: 0.0177 - mse: 0.0011 - val_loss: 7.0793e-04 - val_mae: 0.0193 - val_mse: 7.0793e-04\n",
      "Epoch 13/30\n",
      "10000/10000 [==============================] - 1s 70us/step - loss: 0.0011 - mae: 0.0181 - mse: 0.0011 - val_loss: 5.4336e-04 - val_mae: 0.0152 - val_mse: 5.4336e-04\n",
      "Epoch 14/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0011 - mae: 0.0176 - mse: 0.0011 - val_loss: 5.3393e-04 - val_mae: 0.0151 - val_mse: 5.3393e-04\n",
      "Epoch 15/30\n",
      "10000/10000 [==============================] - 1s 73us/step - loss: 0.0011 - mae: 0.0176 - mse: 0.0011 - val_loss: 5.0928e-04 - val_mae: 0.0148 - val_mse: 5.0928e-04\n",
      "Epoch 16/30\n",
      "10000/10000 [==============================] - 1s 78us/step - loss: 0.0011 - mae: 0.0181 - mse: 0.0011 - val_loss: 5.0869e-04 - val_mae: 0.0148 - val_mse: 5.0869e-04\n",
      "Epoch 17/30\n",
      "10000/10000 [==============================] - 1s 70us/step - loss: 0.0011 - mae: 0.0176 - mse: 0.0011 - val_loss: 4.9879e-04 - val_mae: 0.0145 - val_mse: 4.9879e-04\n",
      "Epoch 18/30\n",
      "10000/10000 [==============================] - 1s 69us/step - loss: 0.0011 - mae: 0.0182 - mse: 0.0011 - val_loss: 5.0345e-04 - val_mae: 0.0146 - val_mse: 5.0345e-04\n",
      "Epoch 19/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0011 - mae: 0.0171 - mse: 0.0011 - val_loss: 5.9171e-04 - val_mae: 0.0162 - val_mse: 5.9171e-04\n",
      "Epoch 20/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0011 - mae: 0.0185 - mse: 0.0011 - val_loss: 5.1358e-04 - val_mae: 0.0146 - val_mse: 5.1358e-04\n",
      "Epoch 21/30\n",
      "10000/10000 [==============================] - 1s 69us/step - loss: 0.0011 - mae: 0.0176 - mse: 0.0011 - val_loss: 5.1617e-04 - val_mae: 0.0154 - val_mse: 5.1617e-04\n",
      "Epoch 22/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0011 - mae: 0.0178 - mse: 0.0011 - val_loss: 4.9265e-04 - val_mae: 0.0148 - val_mse: 4.9265e-04\n",
      "Epoch 23/30\n",
      "10000/10000 [==============================] - 1s 67us/step - loss: 0.0011 - mae: 0.0176 - mse: 0.0011 - val_loss: 5.1114e-04 - val_mae: 0.0151 - val_mse: 5.1114e-04\n",
      "Epoch 24/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0010 - mae: 0.0169 - mse: 0.0010 - val_loss: 5.2458e-04 - val_mae: 0.0152 - val_mse: 5.2458e-04\n",
      "Epoch 25/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0010 - mae: 0.0173 - mse: 0.0010 - val_loss: 5.0364e-04 - val_mae: 0.0150 - val_mse: 5.0364e-04\n",
      "Epoch 26/30\n",
      "10000/10000 [==============================] - 1s 68us/step - loss: 0.0010 - mae: 0.0172 - mse: 0.0010 - val_loss: 4.7858e-04 - val_mae: 0.0143 - val_mse: 4.7858e-04\n",
      "Epoch 27/30\n",
      "10000/10000 [==============================] - 1s 69us/step - loss: 0.0010 - mae: 0.0173 - mse: 0.0010 - val_loss: 4.8136e-04 - val_mae: 0.0147 - val_mse: 4.8136e-04\n",
      "Epoch 28/30\n",
      "10000/10000 [==============================] - 1s 67us/step - loss: 0.0010 - mae: 0.0167 - mse: 0.0010 - val_loss: 5.4704e-04 - val_mae: 0.0156 - val_mse: 5.4704e-04\n",
      "Epoch 29/30\n",
      "10000/10000 [==============================] - 1s 67us/step - loss: 0.0010 - mae: 0.0172 - mse: 0.0010 - val_loss: 5.0954e-04 - val_mae: 0.0150 - val_mse: 5.0954e-04\n",
      "Epoch 30/30\n",
      "10000/10000 [==============================] - 1s 66us/step - loss: 0.0010 - mae: 0.0174 - mse: 0.0010 - val_loss: 4.7419e-04 - val_mae: 0.0150 - val_mse: 4.7419e-04\n",
      "test predict: [[13.734245]\n",
      " [13.638405]\n",
      " [13.410933]\n",
      " [13.342848]\n",
      " [13.025597]]\n",
      "[13.734245 13.638405 13.410933 13.342848 13.025597]\n"
     ]
    }
   ],
   "source": [
    "#训练湿度的模型，并且保存模型\n",
    "p = r'humd_df.csv'\n",
    "df = pd.read_csv(p)\n",
    "t = pd.DataFrame(df['number'])\n",
    "initiation = RNNModel(t,look_back=30, pred_length=5, epochs=30, batch_size=64, verbose=1, patience=10,model_name='humd_model.h5',scaler_name='humd_s.pkl')\n",
    "humd_preds = initiation.run()\n",
    "print(humd_preds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#训练stem模型，并且保存模型\n",
    "p = r'stem_df.csv'\n",
    "df = pd.read_csv(p)\n",
    "t = pd.DataFrame(df['number'])\n",
    "# look_back 训练源数据项数\n",
    "# pred_length 预测的数据项数\n",
    "# epochs 训练的次数\n",
    "# batch_size 每个批次的样本数(从源数据中分批获取)\n",
    "initiation = RNNModel(t,look_back=50, pred_length=20, epochs=60, batch_size=64, verbose=1, patience=10,model_name='stem_model.h5',scaler_name='stem_s.pkl')\n",
    "stem_preds = initiation.run()\n",
    "print(stem_preds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import pickle\n",
    "\n",
    "#加载scaler\n",
    "with open('humd_s.pkl', 'rb') as fr:\n",
    "    humd_s = pickle.load(fr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "humd_s.transform([[86.2]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
