{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "piano-million",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split  #数据分区\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# Setting seed for reproducability\n",
    "np.random.seed(1234)  \n",
    "PYTHONHASHSEED = 0\n",
    "from sklearn import preprocessing\n",
    "from sklearn.metrics import confusion_matrix, recall_score, precision_score\n",
    "from keras.models import Sequential\n",
    "from keras.layers import Dense, Dropout, LSTM, Activation\n",
    "%matplotlib inline\n",
    "from wPCA import PCAcomponent\n",
    "from wPCA import PCApercent\n",
    "from keras.callbacks import ReduceLROnPlateau\n",
    "import keras"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "sustained-printer",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df = pd.read_csv('newmerge1.txt', sep=\" \", header=None)\n",
    "train_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',\n",
    "       's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',\n",
    "       's15', 's16', 's17', 's18', 's19', 's20', 's21', 'RUL', 'label1',\n",
    "       'label2', 'cycle_norm']\n",
    "test_df = pd.read_csv('newmerge2.txt', sep=\" \", header=None)\n",
    "test_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',\n",
    "       's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',\n",
    "       's15', 's16', 's17', 's18', 's19', 's20', 's21', 'RUL', 'label1',\n",
    "       'label2', 'cycle_norm']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "considered-relation",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>cycle</th>\n",
       "      <th>setting1</th>\n",
       "      <th>setting2</th>\n",
       "      <th>setting3</th>\n",
       "      <th>s1</th>\n",
       "      <th>s2</th>\n",
       "      <th>s3</th>\n",
       "      <th>s4</th>\n",
       "      <th>s5</th>\n",
       "      <th>...</th>\n",
       "      <th>s16</th>\n",
       "      <th>s17</th>\n",
       "      <th>s18</th>\n",
       "      <th>s19</th>\n",
       "      <th>s20</th>\n",
       "      <th>s21</th>\n",
       "      <th>RUL</th>\n",
       "      <th>label1</th>\n",
       "      <th>label2</th>\n",
       "      <th>cycle_norm</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.46</td>\n",
       "      <td>0.17</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.18</td>\n",
       "      <td>0.41</td>\n",
       "      <td>0.31</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.33</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.71</td>\n",
       "      <td>0.72</td>\n",
       "      <td>191.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.00</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.61</td>\n",
       "      <td>0.25</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.28</td>\n",
       "      <td>0.45</td>\n",
       "      <td>0.35</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.33</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.67</td>\n",
       "      <td>0.73</td>\n",
       "      <td>190.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.00</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>0.25</td>\n",
       "      <td>0.75</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.34</td>\n",
       "      <td>0.37</td>\n",
       "      <td>0.37</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.17</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.63</td>\n",
       "      <td>0.62</td>\n",
       "      <td>189.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.01</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.54</td>\n",
       "      <td>0.50</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.34</td>\n",
       "      <td>0.26</td>\n",
       "      <td>0.33</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.33</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.57</td>\n",
       "      <td>0.66</td>\n",
       "      <td>188.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.01</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1.0</td>\n",
       "      <td>5.0</td>\n",
       "      <td>0.39</td>\n",
       "      <td>0.33</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.35</td>\n",
       "      <td>0.26</td>\n",
       "      <td>0.40</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.42</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.59</td>\n",
       "      <td>0.70</td>\n",
       "      <td>187.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.01</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 30 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "    id  cycle  setting1  setting2  setting3   s1    s2    s3    s4   s5  ...  \\\n",
       "0  1.0    1.0      0.46      0.17       0.0  0.0  0.18  0.41  0.31  0.0  ...   \n",
       "1  1.0    2.0      0.61      0.25       0.0  0.0  0.28  0.45  0.35  0.0  ...   \n",
       "2  1.0    3.0      0.25      0.75       0.0  0.0  0.34  0.37  0.37  0.0  ...   \n",
       "3  1.0    4.0      0.54      0.50       0.0  0.0  0.34  0.26  0.33  0.0  ...   \n",
       "4  1.0    5.0      0.39      0.33       0.0  0.0  0.35  0.26  0.40  0.0  ...   \n",
       "\n",
       "   s16   s17  s18  s19   s20   s21    RUL  label1  label2  cycle_norm  \n",
       "0  0.0  0.33  0.0  0.0  0.71  0.72  191.0     0.0     0.0        0.00  \n",
       "1  0.0  0.33  0.0  0.0  0.67  0.73  190.0     0.0     0.0        0.00  \n",
       "2  0.0  0.17  0.0  0.0  0.63  0.62  189.0     0.0     0.0        0.01  \n",
       "3  0.0  0.33  0.0  0.0  0.57  0.66  188.0     0.0     0.0        0.01  \n",
       "4  0.0  0.42  0.0  0.0  0.59  0.70  187.0     0.0     0.0        0.01  \n",
       "\n",
       "[5 rows x 30 columns]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "worthy-benefit",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(20631, 30)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_df.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "manufactured-calendar",
   "metadata": {},
   "outputs": [],
   "source": [
    "#皮尔森\n",
    "def Pearson(A,n):\n",
    "    import random\n",
    "    lr_legend = []\n",
    "    list1 = np.array(A)\n",
    "    list2 = list1\n",
    "    for i in range(len(list1)-1):\n",
    "        p = list1[i,:]\n",
    "        q = list1[i+1,:]\n",
    "        X=np.vstack([p,q])\n",
    "        d2=(np.corrcoef(X)[0][1]*100000)%10+random.randint(-1,1) # 0.935484\t0.913043\t0.84\t0.875000\n",
    "#         d2=(np.corrcoef(X)[0][1]*100000)%10 #0.913979\t0.869565\t0.8\t0.833333\n",
    "        if d2 <n:\n",
    "            lr_legend.append(i)\n",
    "    lr_legend = np.array(lr_legend)\n",
    "    np.array(list2)\n",
    "    list2 = np.delete(list2,lr_legend,0)\n",
    "    list2 = pd.DataFrame(list2)\n",
    "    list2.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',\n",
    "       's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',\n",
    "       's15', 's16', 's17', 's18', 's19', 's20', 's21', 'RUL', 'label1',\n",
    "       'label2', 'cycle_norm']\n",
    "    return list2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "annoying-circulation",
   "metadata": {},
   "outputs": [],
   "source": [
    "def WanDuZi(A,n):\n",
    "    b2 = ['s' + str(i) for i in range(1,22)]\n",
    "    data = A[b2]\n",
    "    data = np.mat(data)\n",
    "    pca = PCAcomponent(data, n)\n",
    "    pca.fit()\n",
    "    ab = np.array(pca.low_dataMat)\n",
    "    b2 = pd.DataFrame(ab)\n",
    "    b2.columns = ['a' + str(i) for i in range(1,n+1)]\n",
    "    b2\n",
    "    b3 = ['s' + str(i) for i in range(1,22)]\n",
    "    A.drop(b3,axis=1, inplace=True)\n",
    "    b2.columns = ['a' + str(i) for i in range(1,n+1)]\n",
    "    traindf = A.join(b2)\n",
    "    return traindf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "transsexual-pearl",
   "metadata": {},
   "outputs": [],
   "source": [
    "# function to reshape features into (samples, time steps, features) \n",
    "def gen_sequence(id_df, seq_length, seq_cols):\n",
    "    \"\"\" Only sequences that meet the window-length are considered, no padding is used. This means for testing\n",
    "    we need to drop those which are below the window-length. An alternative would be to pad sequences so that\n",
    "    we can use shorter ones \"\"\"\n",
    "    data_array = id_df[seq_cols].values\n",
    "    num_elements = data_array.shape[0]\n",
    "    for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):\n",
    "        yield data_array[start:stop, :]\n",
    "        \n",
    "# function to generate labels\n",
    "def gen_labels(id_df, seq_length, label):\n",
    "    data_array = id_df[label].values\n",
    "    num_elements = data_array.shape[0]\n",
    "    return data_array[seq_length:num_elements, :]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "diverse-place",
   "metadata": {},
   "outputs": [],
   "source": [
    "# train_df = Pearson(train_df,1)\n",
    "# print(train_df.shape)\n",
    "\n",
    "# traindf = WanDuZi(train_df,15)\n",
    "\n",
    "# train_set2,test_set2=np.split(traindf,[int(len(traindf)*0.8)])  #80%训练集、30%测试集、10%验证集\n",
    "# print(len(train_set2),len(test_set2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "thick-break",
   "metadata": {},
   "outputs": [],
   "source": [
    "# train_set2.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "rising-karaoke",
   "metadata": {},
   "outputs": [],
   "source": [
    "# sequence_length = 50\n",
    "# # pick the feature columns \n",
    "# sensor_cols = ['a' + str(k) for k in range(1,int(15)+1)]\n",
    "# sequence_cols = ['setting1', 'setting2', 'setting3', 'cycle_norm']\n",
    "# sequence_cols.extend(sensor_cols)\n",
    "# # generator for the sequences\n",
    "# seq_gen = (list(gen_sequence(train_set2[train_set2['id']==id], sequence_length, sequence_cols)) \n",
    "#                    for id in train_set2['id'].unique())\n",
    "# # generate sequences and convert to numpy array\n",
    "# seq_array = np.concatenate(list(seq_gen)).astype(np.float32)\n",
    "# seq_array.shape\n",
    "# # generate labels\n",
    "# label_gen = [gen_labels(train_set2[train_set2['id']==id], sequence_length, ['label1']) \n",
    "#                      for id in train_set2['id'].unique()]\n",
    "# label_array = np.concatenate(label_gen).astype(np.float32)\n",
    "# label_array.shape\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "lasting-arthur",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # build the network\n",
    "# nb_features = seq_array.shape[2]\n",
    "# nb_out = label_array.shape[1]\n",
    "\n",
    "# model = Sequential()\n",
    "\n",
    "# model.add(LSTM(input_shape=(sequence_length, nb_features),\n",
    "#                                              units=120,\n",
    "#                                              return_sequences=True))\n",
    "\n",
    "# model.add(Dropout(0.25))#0.2-》0.25 0.956989\t0.920000\t0.92\t0.920000\n",
    "\n",
    "# model.add(LSTM(units=50,return_sequences=False))\n",
    "# model.add(Dropout(0.25))\n",
    "\n",
    "#                     #待定 实践不行\n",
    "#                     # model.add(Flatten())\n",
    "#                     # model.add(LeakyReLU(alpha=0.2))\n",
    "\n",
    "\n",
    "\n",
    "# model.add(Dense(units=nb_out, activation='sigmoid'))\n",
    "# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "\n",
    "# reduce1 = ReduceLROnPlateau(monitor='val_loss',\n",
    "#                                              factor=0.1,\n",
    "#                                              patience=2,\n",
    "#                                              verbose=0,\n",
    "#                                              mode='auto',\n",
    "#                                              min_delta=0,\n",
    "#                                              cooldown=0,\n",
    "#                                              min_lr=0)\n",
    "# model.fit(seq_array, label_array, epochs=10, batch_size=200, validation_split=0.05, verbose=1,\n",
    "#                               callbacks = [keras.callbacks.EarlyStopping(monitor='accuracy', min_delta=0, patience=0, verbose=0, mode='auto'),reduce1])\n",
    "        \n",
    "# # training metrics\n",
    "# scores = model.evaluate(seq_array, label_array, verbose=1, batch_size=200)\n",
    "# print('Accurracy: {}'.format(scores[1]))\n",
    "# # make predictions and compute confusion matrix\n",
    "# y_pred = model.predict_classes(seq_array,verbose=1, batch_size=200)\n",
    "# y_true = label_array\n",
    "# print('Confusion matrix\\n- x-axis is true labels.\\n- y-axis is predicted labels')\n",
    "# cm = confusion_matrix(y_true, y_pred)\n",
    "# cm\n",
    "# # compute precision and recall\n",
    "# precision = precision_score(y_true, y_pred)\n",
    "# recall = recall_score(y_true, y_pred)\n",
    "# print( 'precision = ', precision, '\\n', 'recall = ', recall)\n",
    "# seq_array_test_last = [test_set2[test_set2['id']==id][sequence_cols].values[-sequence_length:] \n",
    "#                                for id in test_set2['id'].unique() if len(test_set2[test_set2['id']==id]) >= sequence_length]\n",
    "\n",
    "# seq_array_test_last = np.asarray(seq_array_test_last).astype(np.float32)\n",
    "# seq_array_test_last.shape\n",
    "\n",
    "# y_mask = [len(test_set2[test_set2['id']==id]) >= sequence_length for id in test_set2['id'].unique()]\n",
    "\n",
    "# # test metrics\n",
    "# scores_test = model.evaluate(seq_array_test_last, label_array_test_last, verbose=2)\n",
    "# print('Accurracy: {}'.format(scores_test[1]))\n",
    "# # make predictions and compute confusion matrix\n",
    "# y_pred_test = model.predict_classes(seq_array_test_last)\n",
    "# y_true_test = label_array_test_last\n",
    "# print('Confusion matrix\\n- x-axis is true labels.\\n- y-axis is predicted labels')\n",
    "# cm = confusion_matrix(y_true_test, y_pred_test)\n",
    "# cm\n",
    "\n",
    "# # compute precision and recall\n",
    "# precision_test = precision_score(y_true_test, y_pred_test)\n",
    "# recall_test = recall_score(y_true_test, y_pred_test)\n",
    "# f1_test = 2 * (precision_test * recall_test) / (precision_test + recall_test)\n",
    "# print( 'Precision: ', precision_test, '\\n', 'Recall: ', recall_test,'\\n', 'F1-score:', f1_test )\n",
    "\n",
    "# results_df = pd.DataFrame([[scores_test[1],precision_test,recall_test,f1_test],\n",
    "#                                   [0.94, 0.952381, 0.8, 0.869565]],\n",
    "#                                  columns = ['Accuracy', 'Precision', 'Recall', 'F1-score'],\n",
    "#                                  index = ['LSTM',\n",
    "#                                          'Template Best Model'])\n",
    "# results_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "automated-heater",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13371 7201\n",
      "Train on 9519 samples, validate on 502 samples\n",
      "Epoch 1/10\n",
      "9519/9519 [==============================] - 7s 777us/step - loss: 0.2676 - accuracy: 0.8892 - val_loss: 0.2974 - val_accuracy: 0.8725\n",
      "Epoch 2/10\n",
      "9519/9519 [==============================] - 7s 683us/step - loss: 0.1177 - accuracy: 0.9512 - val_loss: 0.1189 - val_accuracy: 0.9522\n",
      "Epoch 3/10\n",
      "9519/9519 [==============================] - 7s 687us/step - loss: 0.0992 - accuracy: 0.9599 - val_loss: 0.1204 - val_accuracy: 0.9602\n",
      "Epoch 4/10\n",
      "9519/9519 [==============================] - 7s 687us/step - loss: 0.0770 - accuracy: 0.9689 - val_loss: 0.1339 - val_accuracy: 0.9482\n",
      "Epoch 5/10\n",
      "9519/9519 [==============================] - 7s 684us/step - loss: 0.0575 - accuracy: 0.9774 - val_loss: 0.1303 - val_accuracy: 0.9502\n",
      "Epoch 6/10\n",
      "9519/9519 [==============================] - 7s 702us/step - loss: 0.0522 - accuracy: 0.9783 - val_loss: 0.1184 - val_accuracy: 0.9542\n",
      "Epoch 7/10\n",
      "9519/9519 [==============================] - 7s 686us/step - loss: 0.0493 - accuracy: 0.9805 - val_loss: 0.1150 - val_accuracy: 0.9582\n",
      "Epoch 8/10\n",
      "9519/9519 [==============================] - 7s 684us/step - loss: 0.0470 - accuracy: 0.9810 - val_loss: 0.1257 - val_accuracy: 0.9582\n",
      "Epoch 9/10\n",
      "9519/9519 [==============================] - 7s 683us/step - loss: 0.0468 - accuracy: 0.9809 - val_loss: 0.1175 - val_accuracy: 0.9602\n",
      "10021/10021 [==============================] - 2s 190us/step\n",
      "Accurracy: 0.9822372794151306\n",
      "10021/10021 [==============================] - 2s 195us/step\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "precision =  0.971028971028971 \n",
      " recall =  0.9418604651162791\n",
      "Accurracy: 0.9354838728904724\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "Precision:  0.8518518518518519 \n",
      " Recall:  0.92 \n",
      " F1-score: 0.8846153846153846\n",
      "13369 7199\n",
      "Train on 9518 samples, validate on 501 samples\n",
      "Epoch 1/10\n",
      "9518/9518 [==============================] - 7s 744us/step - loss: 0.2847 - accuracy: 0.8916 - val_loss: 0.3408 - val_accuracy: 0.8543\n",
      "Epoch 2/10\n",
      "9518/9518 [==============================] - 7s 688us/step - loss: 0.1152 - accuracy: 0.9537 - val_loss: 0.1326 - val_accuracy: 0.9541\n",
      "Epoch 3/10\n",
      "9518/9518 [==============================] - 7s 687us/step - loss: 0.0803 - accuracy: 0.9687 - val_loss: 0.1834 - val_accuracy: 0.9521\n",
      "Epoch 4/10\n",
      "9518/9518 [==============================] - 7s 685us/step - loss: 0.0878 - accuracy: 0.9656 - val_loss: 0.1078 - val_accuracy: 0.9621\n",
      "10019/10019 [==============================] - 2s 191us/step\n",
      "Accurracy: 0.9708553552627563\n",
      "10019/10019 [==============================] - 2s 195us/step\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "precision =  0.9162768942937325 \n",
      " recall =  0.9454633204633205\n",
      "Accurracy: 0.7204301357269287\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "Precision:  0.0 \n",
      " Recall:  0.0 \n",
      " F1-score: nan\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YCX\\.conda\\envs\\tensor1\\lib\\site-packages\\ipykernel_launcher.py:107: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13371 7200\n",
      "Train on 9519 samples, validate on 502 samples\n",
      "Epoch 1/10\n",
      "9519/9519 [==============================] - 7s 781us/step - loss: 0.2805 - accuracy: 0.8771 - val_loss: 0.3707 - val_accuracy: 0.8147\n",
      "Epoch 2/10\n",
      "9519/9519 [==============================] - 7s 705us/step - loss: 0.1315 - accuracy: 0.9471 - val_loss: 0.1002 - val_accuracy: 0.9622\n",
      "Epoch 3/10\n",
      "9519/9519 [==============================] - 7s 730us/step - loss: 0.0884 - accuracy: 0.9642 - val_loss: 0.2071 - val_accuracy: 0.9303\n",
      "Epoch 4/10\n",
      "9519/9519 [==============================] - 7s 688us/step - loss: 0.0763 - accuracy: 0.9697 - val_loss: 0.1031 - val_accuracy: 0.9542\n",
      "Epoch 5/10\n",
      "9519/9519 [==============================] - 7s 699us/step - loss: 0.0598 - accuracy: 0.9784 - val_loss: 0.1099 - val_accuracy: 0.9582\n",
      "Epoch 6/10\n",
      "9519/9519 [==============================] - 7s 690us/step - loss: 0.0546 - accuracy: 0.9763 - val_loss: 0.1148 - val_accuracy: 0.9562\n",
      "10021/10021 [==============================] - 2s 195us/step\n",
      "Accurracy: 0.9791437983512878\n",
      "10021/10021 [==============================] - 2s 205us/step\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "precision =  0.9535679374389052 \n",
      " recall =  0.9447941888619855\n",
      "Accurracy: 0.7096773982048035\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "Precision:  0.0 \n",
      " Recall:  0.0 \n",
      " F1-score: nan\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YCX\\.conda\\envs\\tensor1\\lib\\site-packages\\ipykernel_launcher.py:107: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13360 7194\n",
      "Train on 9509 samples, validate on 501 samples\n",
      "Epoch 1/10\n",
      "9509/9509 [==============================] - 7s 759us/step - loss: 0.2742 - accuracy: 0.8814 - val_loss: 0.3942 - val_accuracy: 0.8204\n",
      "Epoch 2/10\n",
      "9509/9509 [==============================] - 7s 699us/step - loss: 0.1294 - accuracy: 0.9473 - val_loss: 0.2755 - val_accuracy: 0.9361\n",
      "Epoch 3/10\n",
      "9509/9509 [==============================] - 7s 703us/step - loss: 0.0938 - accuracy: 0.9622 - val_loss: 0.2293 - val_accuracy: 0.8842\n",
      "Epoch 4/10\n",
      "9509/9509 [==============================] - 7s 701us/step - loss: 0.0750 - accuracy: 0.9698 - val_loss: 0.0786 - val_accuracy: 0.9621\n",
      "Epoch 5/10\n",
      "9509/9509 [==============================] - 7s 698us/step - loss: 0.0638 - accuracy: 0.9737 - val_loss: 0.1063 - val_accuracy: 0.9581\n",
      "Epoch 6/10\n",
      "9509/9509 [==============================] - 7s 705us/step - loss: 0.0607 - accuracy: 0.9746 - val_loss: 0.1240 - val_accuracy: 0.9561\n",
      "Epoch 7/10\n",
      "9509/9509 [==============================] - 7s 700us/step - loss: 0.0465 - accuracy: 0.9813 - val_loss: 0.0902 - val_accuracy: 0.9641\n",
      "Epoch 8/10\n",
      "9509/9509 [==============================] - 7s 697us/step - loss: 0.0414 - accuracy: 0.9832 - val_loss: 0.1128 - val_accuracy: 0.9581\n",
      "Epoch 9/10\n",
      "9509/9509 [==============================] - 7s 700us/step - loss: 0.0394 - accuracy: 0.9838 - val_loss: 0.1220 - val_accuracy: 0.9541\n",
      "Epoch 10/10\n",
      "9509/9509 [==============================] - 7s 721us/step - loss: 0.0391 - accuracy: 0.9834 - val_loss: 0.1241 - val_accuracy: 0.9581\n",
      "10010/10010 [==============================] - 2s 213us/step\n",
      "Accurracy: 0.9822177886962891\n",
      "10010/10010 [==============================] - 2s 210us/step\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "precision =  0.9660600098376783 \n",
      " recall =  0.9474191992281717\n",
      "Accurracy: 0.698924720287323\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "Precision:  0.0 \n",
      " Recall:  0.0 \n",
      " F1-score: nan\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YCX\\.conda\\envs\\tensor1\\lib\\site-packages\\ipykernel_launcher.py:107: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13360 7194\n",
      "Train on 9509 samples, validate on 501 samples\n",
      "Epoch 1/10\n",
      "9509/9509 [==============================] - 8s 814us/step - loss: 0.2770 - accuracy: 0.8845 - val_loss: 0.2393 - val_accuracy: 0.9122\n",
      "Epoch 2/10\n",
      "9509/9509 [==============================] - 7s 783us/step - loss: 0.1267 - accuracy: 0.9513 - val_loss: 0.1519 - val_accuracy: 0.9242\n",
      "Epoch 3/10\n",
      "9509/9509 [==============================] - 7s 773us/step - loss: 0.0810 - accuracy: 0.9658 - val_loss: 0.2299 - val_accuracy: 0.9261\n",
      "Epoch 4/10\n",
      "9509/9509 [==============================] - 7s 739us/step - loss: 0.0815 - accuracy: 0.9670 - val_loss: 0.1454 - val_accuracy: 0.9501\n",
      "Epoch 5/10\n",
      "9509/9509 [==============================] - 7s 762us/step - loss: 0.0692 - accuracy: 0.9720 - val_loss: 0.1398 - val_accuracy: 0.9501\n",
      "Epoch 6/10\n",
      "9509/9509 [==============================] - 7s 754us/step - loss: 0.0661 - accuracy: 0.9721 - val_loss: 0.1329 - val_accuracy: 0.9521\n",
      "Epoch 7/10\n",
      "9509/9509 [==============================] - 7s 778us/step - loss: 0.0578 - accuracy: 0.9750 - val_loss: 0.1480 - val_accuracy: 0.9501\n",
      "Epoch 8/10\n",
      "9509/9509 [==============================] - 8s 796us/step - loss: 0.0527 - accuracy: 0.9777 - val_loss: 0.1153 - val_accuracy: 0.9581\n",
      "Epoch 9/10\n",
      "9509/9509 [==============================] - 7s 749us/step - loss: 0.0552 - accuracy: 0.9773 - val_loss: 0.1274 - val_accuracy: 0.9501\n",
      "10010/10010 [==============================] - 2s 204us/step\n",
      "Accurracy: 0.9795204997062683\n",
      "10010/10010 [==============================] - 2s 209us/step\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "precision =  0.9900262467191601 \n",
      " recall =  0.9102316602316602\n",
      "Accurracy: 0.7096773982048035\n",
      "Confusion matrix\n",
      "- x-axis is true labels.\n",
      "- y-axis is predicted labels\n",
      "Precision:  0.0 \n",
      " Recall:  0.0 \n",
      " F1-score: nan\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YCX\\.conda\\envs\\tensor1\\lib\\site-packages\\ipykernel_launcher.py:107: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13367 7198\n",
      "Train on 9516 samples, validate on 501 samples\n",
      "Epoch 1/10\n",
      "1200/9516 [==>...........................] - ETA: 10s - loss: 0.5919 - accuracy: 0.7408"
     ]
    }
   ],
   "source": [
    "best_score = 0\n",
    "best_precision = 0\n",
    "best_div =0\n",
    "for n in range(10,21):\n",
    "    for j in range(0,4):\n",
    "        for pp in [0.5,0.55,0.6,0.65,0.7,0.75,0.8]:\n",
    "            train_df1 = Pearson(train_df,j)\n",
    "            test_df1 = Pearson(test_df,j)\n",
    "\n",
    "            traindf = WanDuZi(train_df1,int(n))\n",
    "            testdf = WanDuZi(test_df1,int(n))\n",
    "\n",
    "            train_set2,test_set2=np.split(traindf,[int(len(traindf)*0.65)])  #80%训练集、30%测试集、10%验证集\n",
    "            print(len(train_set2),len(test_set2))\n",
    "\n",
    "            sequence_length = 50\n",
    "\n",
    "            # pick the feature columns \n",
    "            sensor_cols = ['a' + str(k) for k in range(1,int(n)+1)]\n",
    "            sequence_cols = ['setting1', 'setting2', 'setting3', 'cycle_norm']\n",
    "            sequence_cols.extend(sensor_cols)\n",
    "            # generator for the sequences\n",
    "            seq_gen = (list(gen_sequence(train_set2[train_set2['id']==id], sequence_length, sequence_cols)) \n",
    "                       for id in train_set2['id'].unique())\n",
    "            # generate sequences and convert to numpy array\n",
    "            seq_array = np.concatenate(list(seq_gen)).astype(np.float32)\n",
    "            seq_array.shape\n",
    "            # generate labels\n",
    "            label_gen = [gen_labels(train_set2[train_set2['id']==id], sequence_length, ['label1']) \n",
    "                         for id in train_set2['id'].unique()]\n",
    "            label_array = np.concatenate(label_gen).astype(np.float32)\n",
    "            label_array.shape\n",
    "\n",
    "\n",
    "\n",
    "            # build the network\n",
    "            nb_features = seq_array.shape[2]\n",
    "            nb_out = label_array.shape[1]\n",
    "\n",
    "            model = Sequential()\n",
    "\n",
    "            model.add(LSTM(input_shape=(sequence_length, nb_features),\n",
    "                                                 units=120,\n",
    "                                                 return_sequences=True))\n",
    "\n",
    "            model.add(Dropout(0.25))#0.2-》0.25 0.956989\t0.920000\t0.92\t0.920000\n",
    "\n",
    "            model.add(LSTM(units=50,return_sequences=False))\n",
    "            model.add(Dropout(0.25))\n",
    "\n",
    "                        #待定 实践不行\n",
    "                        # model.add(Flatten())\n",
    "                        # model.add(LeakyReLU(alpha=0.2))\n",
    "\n",
    "\n",
    "\n",
    "            model.add(Dense(units=nb_out, activation='sigmoid'))\n",
    "            model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "\n",
    "            reduce1 = ReduceLROnPlateau(monitor='val_loss',\n",
    "                                                 factor=0.1,\n",
    "                                                 patience=2,\n",
    "                                                 verbose=0,\n",
    "                                                 mode='auto',\n",
    "                                                 min_delta=0,\n",
    "                                                 cooldown=0,\n",
    "                                                 min_lr=0)\n",
    "            model.fit(seq_array, label_array, epochs=10, batch_size=200, validation_split=0.05, verbose=1,\n",
    "                                  callbacks = [keras.callbacks.EarlyStopping(monitor='accuracy', min_delta=0, patience=0, verbose=0, mode='auto'),reduce1])\n",
    "\n",
    "            # training metrics\n",
    "            scores = model.evaluate(seq_array, label_array, verbose=1, batch_size=200)\n",
    "            print('Accurracy: {}'.format(scores[1]))\n",
    "            # make predictions and compute confusion matrix\n",
    "            y_pred = model.predict_classes(seq_array,verbose=1, batch_size=200)\n",
    "            y_true = label_array\n",
    "            print('Confusion matrix\\n- x-axis is true labels.\\n- y-axis is predicted labels')\n",
    "            cm = confusion_matrix(y_true, y_pred)\n",
    "            cm\n",
    "            # compute precision and recall\n",
    "            precision = precision_score(y_true, y_pred)\n",
    "            recall = recall_score(y_true, y_pred)\n",
    "            print( 'precision = ', precision, '\\n', 'recall = ', recall)\n",
    "            seq_array_test_last = [testdf[testdf['id']==id][sequence_cols].values[-sequence_length:] \n",
    "                                   for id in testdf['id'].unique() if len(testdf[testdf['id']==id]) >= sequence_length]\n",
    "\n",
    "            seq_array_test_last = np.asarray(seq_array_test_last).astype(np.float32)\n",
    "            seq_array_test_last.shape\n",
    "\n",
    "            y_mask = [len(testdf[testdf['id']==id]) >= sequence_length for id in testdf['id'].unique()]\n",
    "            label_array_test_last = testdf.groupby('id')['label1'].nth(-1)[y_mask].values\n",
    "            label_array_test_last = label_array_test_last.reshape(label_array_test_last.shape[0],1).astype(np.float32)\n",
    "            label_array_test_last.shape\n",
    "            # test metrics\n",
    "            scores_test = model.evaluate(seq_array_test_last, label_array_test_last, verbose=2)\n",
    "            print('Accurracy: {}'.format(scores_test[1]))\n",
    "            # make predictions and compute confusion matrix\n",
    "            y_pred_test = model.predict_classes(seq_array_test_last)\n",
    "            y_true_test = label_array_test_last\n",
    "            print('Confusion matrix\\n- x-axis is true labels.\\n- y-axis is predicted labels')\n",
    "            cm = confusion_matrix(y_true_test, y_pred_test)\n",
    "            cm\n",
    "\n",
    "            # compute precision and recall\n",
    "            precision_test = precision_score(y_true_test, y_pred_test)\n",
    "            recall_test = recall_score(y_true_test, y_pred_test)\n",
    "            f1_test = 2 * (precision_test * recall_test) / (precision_test + recall_test)\n",
    "            print( 'Precision: ', precision_test, '\\n', 'Recall: ', recall_test,'\\n', 'F1-score:', f1_test )\n",
    "\n",
    "            results_df = pd.DataFrame([[scores_test[1],precision_test,recall_test,f1_test],\n",
    "                                      [0.94, 0.952381, 0.8, 0.869565]],\n",
    "                                     columns = ['Accuracy', 'Precision', 'Recall', 'F1-score'],\n",
    "                                     index = ['LSTM',\n",
    "                                             'Template Best Model'])\n",
    "            results_df\n",
    "\n",
    "\n",
    "            if scores_test[1] > best_score and precision_test>best_precision:#找到表现最好的参数\n",
    "                best_score = scores_test[1]\n",
    "                best_precision = precision_test\n",
    "                best_div = pp\n",
    "                best_parameters = {'j':j,'n':n,'pp':pp}\n",
    "                \n",
    "                        \n",
    "\n",
    "print(\"j:{}\".format(j))\n",
    "print(\"n:{}\".format(n))\n",
    "print(\"pp:{}\".format(pp))\n",
    "print(\"accuracy:{}\".format(best_score))\n",
    "print(\"precision:{}\".format(best_precision))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "classified-comedy",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "elegant-planner",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bridal-diagram",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "swiss-essay",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "occasional-genre",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.12"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
