{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Best: 0.791597 using {'learn_rate': 0.001}\n"
     ]
    }
   ],
   "source": [
    "%matplotlib inline\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "import numpy as np \n",
    "import pandas as pd \n",
    "import random\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "from keras.models import Sequential\n",
    "from keras.layers.core import Dense, Dropout, Activation, Flatten\n",
    "from keras.layers.convolutional import Conv1D\n",
    "from keras.layers import AveragePooling1D\n",
    "from keras.utils import np_utils\n",
    "from keras.optimizers import Adam\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from contextlib import redirect_stdout\n",
    "from keras.callbacks import EarlyStopping, ModelCheckpoint\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from keras.wrappers.scikit_learn import KerasClassifier\n",
    "\n",
    "def get_data():\n",
    "    data = pd.read_csv(\"/kaggle/input/train-cnn/p2pData.txt\", sep=\" \", header=None)\n",
    "    label = pd.read_csv(\"/kaggle/input/train-cnn/p2pLabel.txt\", sep=\" \", header=None)\n",
    "    data_columns = data.shape[1]\n",
    "\n",
    "    for i in range(0, data_columns):   ## These three lines make sure there're no NaN columns\n",
    "        if data[i][0] != data[i][0]:\n",
    "            del data[i]\n",
    "\n",
    "    data_columns = data.shape[1]\n",
    "    data.columns = np.arange(0, data_columns)\n",
    "\n",
    "    return data, label\n",
    "\n",
    "\n",
    "def generateSampleList(number):\n",
    "    n_samples = number\n",
    "    sample_list = []\n",
    "    for i in range(0, 6000):\n",
    "        s = random.randint(0, n_samples)\n",
    "        if s not in sample_list:\n",
    "            sample_list.append(s)\n",
    "\n",
    "    return sample_list\n",
    "\n",
    "\n",
    "data, label = get_data()\n",
    "\n",
    "sampleList = generateSampleList(data.shape[0])\n",
    "selectedFeatureList = [0, 1, 61, 62]\n",
    "data = data[selectedFeatureList]\n",
    "\n",
    "data_columns = data.shape[1]\n",
    "data.columns = np.arange(0, data_columns)\n",
    "\n",
    "sampleList = generateSampleList(data.shape[0])\n",
    "data = data.loc[sampleList]\n",
    "label = label.loc[sampleList]\n",
    "\n",
    "scaler = StandardScaler()\n",
    "data = scaler.fit_transform(data)\n",
    "\n",
    "data = np.array(data)\n",
    "label = np.array(label)\n",
    "\n",
    "\n",
    "X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0.2)\n",
    "n_samples,n_features=X_train.shape\n",
    "\n",
    "\n",
    "\n",
    "def reshapeData(data):\n",
    "    n_samples, n_features = data.shape\n",
    "    data=np.reshape(data,(n_samples,n_features,1))\n",
    "    return data\n",
    "\n",
    "X_train=reshapeData(X_train)\n",
    "X_test=reshapeData(X_test)\n",
    "\n",
    "def toOneHot(label):\n",
    "\tenc = OneHotEncoder()\n",
    "\tenc.fit(label)\n",
    "\tlabel = enc.transform(label).toarray()\n",
    "\treturn label\n",
    "\n",
    "y_train = toOneHot(y_train)\n",
    "y_test = toOneHot(y_test)\n",
    "\n",
    "def create_model(learn_rate=0.1):\n",
    "    \n",
    "    adam = Adam(lr=learn_rate,\n",
    "                beta_1=0.9,\n",
    "                beta_2=0.999,\n",
    "                epsilon=1e-08,\n",
    "                amsgrad=True)\n",
    "    \n",
    "    model = Sequential()\n",
    "    \n",
    "    model.add(\n",
    "        Conv1D(64,\n",
    "               kernel_size=7,\n",
    "               strides=2,\n",
    "               input_shape=(n_features, 1),\n",
    "               kernel_initializer='uniform',\n",
    "               padding='same'))\n",
    "    model.add(Activation('relu'))\n",
    "    model.add(Conv1D(64, kernel_size=7, strides=2, padding='same'))\n",
    "    model.add(Activation('relu'))\n",
    "    model.add(AveragePooling1D(pool_size=1))\n",
    "    model.add(Dropout(0.2))\n",
    "\n",
    "    model.add(Conv1D(128, kernel_size=7, strides=2, padding='same'))\n",
    "    model.add(Activation('relu'))\n",
    "    model.add(Conv1D(128, kernel_size=7, strides=2, padding='same'))\n",
    "    model.add(Activation('relu'))\n",
    "    model.add(AveragePooling1D(pool_size=1))\n",
    "    model.add(Dropout(0.2))\n",
    "\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(512))\n",
    "    model.add(Dense(512))\n",
    "    model.add(Dense(5))\n",
    "    model.add(Activation('softmax'))\n",
    "    model.compile(loss='categorical_crossentropy',\n",
    "                  optimizer=adam,\n",
    "                  metrics=['accuracy'])\n",
    "    return model\n",
    "\n",
    "\n",
    "\n",
    "#with open('Model_Architecture.txt', 'w') as f:\n",
    "#    with redirect_stdout(f):\n",
    "#        model.summary()\n",
    "\n",
    "        \n",
    "model = KerasClassifier(build_fn=create_model, epochs=160, batch_size=120, verbose=0)\n",
    "\n",
    "\n",
    "#callbacks = [\n",
    "#    EarlyStopping(monitor='val_acc',\n",
    "#                  min_delta=0.00001,\n",
    "#                  patience=6,\n",
    "#                  verbose=0,\n",
    "#                  mode='max',\n",
    "#                  restore_best_weights=False),\n",
    "#    ModelCheckpoint(filepath='./best_model.h5',\n",
    "#                    monitor='val_acc',\n",
    "#                    mode=\"max\",\n",
    "#                    save_weights_only=False,\n",
    "#                    save_best_only=True)\n",
    "#]\n",
    "\n",
    "learn_rate = [0.001, 0.01, 0.05, 0.07, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n",
    "param_grid = dict(learn_rate=learn_rate)\n",
    "grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=3)\n",
    "grid_result = grid.fit(X_train, y_train)\n",
    "print(\"Best: %f using %s\" %\n",
    "      (grid_result.best_score_, grid_result.best_params_))\n",
    "\n",
    "\n",
    "#history = model.fit(X_train,\n",
    "#                    y_train,\n",
    "#                    batch_size=400,\n",
    "#                    epochs=150,\n",
    "#                    verbose=0,\n",
    "#                    validation_split=0.2)\n",
    "\n",
    "#model.save(\"./best_model.h5\")\n",
    "\n",
    "#Evaluating the model on the test data    \n",
    "#score, accuracy = model.evaluate(X_test, y_test, verbose=0)\n",
    "#print('Test score:', score)\n",
    "#print('Test accuracy:', accuracy)\n",
    "\n",
    "#plt.plot(history.history['loss'], label='train')\n",
    "#plt.plot(history.history['val_loss'], label='val')\n",
    "#plt.xlabel(\"epoch\")\n",
    "#plt.title(\"Loss\")\n",
    "#plt.legend(loc=\"best\")\n",
    "#plt.savefig(\"./loss.png\")\n",
    "#plt.show()\n",
    "\n",
    "#plt.plot(history.history['acc'], label='train')\n",
    "#plt.plot(history.history['val_acc'], label='val')\n",
    "#plt.xlabel(\"epoch\")\n",
    "#plt.title(\"Accuracy\")\n",
    "#plt.legend(loc=\"best\")\n",
    "#plt.savefig(\"./accuracy.png\")\n",
    "#plt.show()"
   ]
  }
 ],
 "metadata": {
  "hide_input": false,
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
