{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[5]\n",
      "[[ 0.  0.  0.  0.  0.  1.  0.  0.  0.  0.]]\n",
      "Training ------------\n",
      "Epoch 1/5\n",
      "60000/60000 [==============================] - 799s 13ms/step - loss: 0.1814 - acc: 0.9459\n",
      "Epoch 2/5\n",
      "60000/60000 [==============================] - 797s 13ms/step - loss: 0.0582 - acc: 0.9821\n",
      "Epoch 3/5\n",
      "60000/60000 [==============================] - 796s 13ms/step - loss: 0.0386 - acc: 0.9881\n",
      "Epoch 4/5\n",
      "60000/60000 [==============================] - 798s 13ms/step - loss: 0.0283 - acc: 0.9913\n",
      "Epoch 5/5\n",
      "60000/60000 [==============================] - 796s 13ms/step - loss: 0.0214 - acc: 0.9926\n",
      "\n",
      "Testing ------------\n",
      "10000/10000 [==============================] - 56s 6ms/step\n",
      "('\\ntest loss: ', 0.027207671511101945)\n",
      "('\\ntest accuracy: ', 0.99129999999999996)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python2.7/dist-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n",
      "  \"This module will be removed in 0.20.\", DeprecationWarning)\n",
      "/usr/local/lib/python2.7/dist-packages/sklearn/grid_search.py:42: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.\n",
      "  DeprecationWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-merror:0.013917\n",
      "[1]\ttrain-merror:0.009367\n",
      "[2]\ttrain-merror:0.008083\n",
      "[3]\ttrain-merror:0.006783\n",
      "[4]\ttrain-merror:0.0062\n",
      "[5]\ttrain-merror:0.005683\n",
      "[6]\ttrain-merror:0.005283\n",
      "[7]\ttrain-merror:0.004917\n",
      "[8]\ttrain-merror:0.004583\n",
      "[9]\ttrain-merror:0.0044\n",
      "[10]\ttrain-merror:0.00405\n",
      "[11]\ttrain-merror:0.003733\n",
      "[12]\ttrain-merror:0.003317\n",
      "[13]\ttrain-merror:0.003117\n",
      "[14]\ttrain-merror:0.00295\n",
      "[15]\ttrain-merror:0.0028\n",
      "[16]\ttrain-merror:0.00265\n",
      "[17]\ttrain-merror:0.00255\n",
      "[18]\ttrain-merror:0.002333\n",
      "[19]\ttrain-merror:0.002217\n",
      "[20]\ttrain-merror:0.002067\n",
      "[21]\ttrain-merror:0.001867\n",
      "[22]\ttrain-merror:0.001767\n",
      "[23]\ttrain-merror:0.0017\n",
      "[24]\ttrain-merror:0.00155\n",
      "[25]\ttrain-merror:0.001433\n",
      "[26]\ttrain-merror:0.001217\n",
      "[27]\ttrain-merror:0.001083\n",
      "[28]\ttrain-merror:0.0009\n",
      "[29]\ttrain-merror:0.0008\n",
      "[30]\ttrain-merror:0.0007\n",
      "[31]\ttrain-merror:0.000633\n",
      "[32]\ttrain-merror:0.000583\n",
      "[33]\ttrain-merror:0.0005\n",
      "[34]\ttrain-merror:0.000433\n",
      "[35]\ttrain-merror:0.0004\n",
      "[36]\ttrain-merror:0.000383\n",
      "[37]\ttrain-merror:0.00035\n",
      "[38]\ttrain-merror:0.00035\n",
      "[39]\ttrain-merror:0.000333\n",
      "[40]\ttrain-merror:0.0003\n",
      "[41]\ttrain-merror:0.000233\n",
      "[42]\ttrain-merror:0.000233\n",
      "[43]\ttrain-merror:0.0002\n",
      "[44]\ttrain-merror:0.000167\n",
      "[45]\ttrain-merror:0.000167\n",
      "[46]\ttrain-merror:0.000133\n",
      "[47]\ttrain-merror:0.000117\n",
      "[48]\ttrain-merror:0.000117\n",
      "[49]\ttrain-merror:0.000117\n",
      "[50]\ttrain-merror:0.0001\n",
      "[51]\ttrain-merror:8.3e-05\n",
      "[52]\ttrain-merror:8.3e-05\n",
      "[53]\ttrain-merror:8.3e-05\n",
      "[54]\ttrain-merror:8.3e-05\n",
      "[55]\ttrain-merror:6.7e-05\n",
      "[56]\ttrain-merror:6.7e-05\n",
      "[57]\ttrain-merror:5e-05\n",
      "[58]\ttrain-merror:3.3e-05\n",
      "[59]\ttrain-merror:3.3e-05\n",
      "[60]\ttrain-merror:3.3e-05\n",
      "[61]\ttrain-merror:3.3e-05\n",
      "[62]\ttrain-merror:3.3e-05\n",
      "[63]\ttrain-merror:1.7e-05\n",
      "[64]\ttrain-merror:1.7e-05\n",
      "[65]\ttrain-merror:1.7e-05\n",
      "[66]\ttrain-merror:1.7e-05\n",
      "[67]\ttrain-merror:1.7e-05\n",
      "[68]\ttrain-merror:1.7e-05\n",
      "[69]\ttrain-merror:1.7e-05\n",
      "[70]\ttrain-merror:1.7e-05\n",
      "[71]\ttrain-merror:1.7e-05\n",
      "[72]\ttrain-merror:1.7e-05\n",
      "[73]\ttrain-merror:1.7e-05\n",
      "[74]\ttrain-merror:1.7e-05\n",
      "[75]\ttrain-merror:1.7e-05\n",
      "[76]\ttrain-merror:1.7e-05\n",
      "[77]\ttrain-merror:1.7e-05\n",
      "[78]\ttrain-merror:1.7e-05\n",
      "[79]\ttrain-merror:1.7e-05\n",
      "[80]\ttrain-merror:1.7e-05\n",
      "[81]\ttrain-merror:1.7e-05\n",
      "[82]\ttrain-merror:1.7e-05\n",
      "[83]\ttrain-merror:1.7e-05\n",
      "[84]\ttrain-merror:1.7e-05\n",
      "[85]\ttrain-merror:1.7e-05\n",
      "[86]\ttrain-merror:1.7e-05\n",
      "[87]\ttrain-merror:1.7e-05\n",
      "[88]\ttrain-merror:1.7e-05\n",
      "[89]\ttrain-merror:1.7e-05\n",
      "[90]\ttrain-merror:1.7e-05\n",
      "[91]\ttrain-merror:1.7e-05\n",
      "[92]\ttrain-merror:1.7e-05\n",
      "[93]\ttrain-merror:1.7e-05\n",
      "[94]\ttrain-merror:1.7e-05\n",
      "[95]\ttrain-merror:1.7e-05\n",
      "[96]\ttrain-merror:1.7e-05\n",
      "[97]\ttrain-merror:1.7e-05\n",
      "[98]\ttrain-merror:0\n",
      "[99]\ttrain-merror:0\n",
      "[100]\ttrain-merror:0\n",
      "[101]\ttrain-merror:0\n",
      "[102]\ttrain-merror:0\n",
      "[103]\ttrain-merror:0\n",
      "[104]\ttrain-merror:0\n",
      "[105]\ttrain-merror:0\n",
      "[106]\ttrain-merror:0\n",
      "[107]\ttrain-merror:0\n",
      "[108]\ttrain-merror:0\n",
      "[109]\ttrain-merror:0\n",
      "[110]\ttrain-merror:0\n",
      "[111]\ttrain-merror:0\n",
      "[112]\ttrain-merror:0\n",
      "[113]\ttrain-merror:0\n",
      "[114]\ttrain-merror:0\n",
      "[115]\ttrain-merror:0\n",
      "[116]\ttrain-merror:0\n",
      "[117]\ttrain-merror:0\n",
      "[118]\ttrain-merror:0\n",
      "[119]\ttrain-merror:0\n",
      "[120]\ttrain-merror:0\n",
      "[121]\ttrain-merror:0\n",
      "[122]\ttrain-merror:0\n",
      "[123]\ttrain-merror:0\n",
      "[124]\ttrain-merror:0\n",
      "[125]\ttrain-merror:0\n",
      "[126]\ttrain-merror:0\n",
      "[127]\ttrain-merror:0\n",
      "[128]\ttrain-merror:0\n",
      "[129]\ttrain-merror:0\n",
      "[130]\ttrain-merror:0\n",
      "[131]\ttrain-merror:0\n",
      "[132]\ttrain-merror:0\n",
      "[133]\ttrain-merror:0\n",
      "[134]\ttrain-merror:0\n",
      "[135]\ttrain-merror:0\n",
      "[136]\ttrain-merror:0\n",
      "[137]\ttrain-merror:0\n",
      "[138]\ttrain-merror:0\n",
      "[139]\ttrain-merror:0\n",
      "[140]\ttrain-merror:0\n",
      "[141]\ttrain-merror:0\n",
      "[142]\ttrain-merror:0\n",
      "[143]\ttrain-merror:0\n",
      "[144]\ttrain-merror:0\n",
      "[145]\ttrain-merror:0\n",
      "[146]\ttrain-merror:0\n",
      "[147]\ttrain-merror:0\n",
      "[148]\ttrain-merror:0\n",
      "[149]\ttrain-merror:0\n",
      "             precision    recall  f1-score   support\n",
      "\n",
      "          0       0.99      0.99      0.99       980\n",
      "          1       1.00      1.00      1.00      1135\n",
      "          2       0.99      0.99      0.99      1032\n",
      "          3       0.99      1.00      0.99      1010\n",
      "          4       0.99      0.99      0.99       982\n",
      "          5       0.99      0.99      0.99       892\n",
      "          6       1.00      0.99      0.99       958\n",
      "          7       0.99      0.99      0.99      1028\n",
      "          8       0.99      0.99      0.99       974\n",
      "          9       0.99      0.99      0.99      1009\n",
      "\n",
      "avg / total       0.99      0.99      0.99     10000\n",
      "\n",
      "[[ 975    0    0    0    0    1    1    1    2    0]\n",
      " [   0 1130    2    1    0    0    1    1    0    0]\n",
      " [   2    0 1023    0    1    0    0    4    2    0]\n",
      " [   0    0    0 1005    0    3    0    1    1    0]\n",
      " [   0    0    0    0  977    0    1    0    1    3]\n",
      " [   2    0    0    7    0  881    1    0    0    1]\n",
      " [   4    2    1    0    1    4  945    0    1    0]\n",
      " [   0    1    4    1    0    0    0 1020    1    1]\n",
      " [   3    0    0    1    0    1    0    2  964    3]\n",
      " [   0    1    0    1    3    3    0    1    0 1000]]\n"
     ]
    }
   ],
   "source": [
    "#cnn with xgboost\n",
    "import numpy as np\n",
    "np.random.seed(1337)  # for reproducibility\n",
    "from keras.datasets import mnist\n",
    "from keras.utils import np_utils\n",
    "from keras.models import Sequential, Model\n",
    "from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten, Input, Dropout\n",
    "from keras.optimizers import Adam\n",
    "\n",
    "# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called\n",
    "# X shape (60,000 28x28), y shape (10,000, )\n",
    "(X_train, y_train), (X_test, y_test) = mnist.load_data('mnist.npz')\n",
    "print(y_train[:1])\n",
    "# data pre-processing\n",
    "X_train = X_train.reshape(-1, 1,28, 28)/255.\n",
    "X_test = X_test.reshape(-1, 1,28, 28)/255.\n",
    "Y_train = np_utils.to_categorical(y_train, num_classes=10)\n",
    "Y_test = np_utils.to_categorical(y_test, num_classes=10)\n",
    "print(Y_train[:1])\n",
    "\n",
    "# Another way to build your CNN\n",
    "inputs = Input(shape=(1,28,28))\n",
    "\n",
    "conv2d1 = Convolution2D(filters=64, kernel_size=5, padding='same', data_format='channels_first')(inputs)\n",
    "activation1 = Activation('relu')(conv2d1)\n",
    "maxpooling1 = MaxPooling2D(pool_size=2, strides=2, padding='same', data_format='channels_first')(activation1)\n",
    "dropout1 = Dropout(0.25)(maxpooling1)\n",
    "\n",
    "conv2d2 = Convolution2D(filters=64, kernel_size=4, padding='same', data_format='channels_first')(dropout1)\n",
    "activation2 = Activation('relu')(conv2d2)\n",
    "maxpooling2 = MaxPooling2D(pool_size=2, strides=2, padding='same', data_format='channels_first')(activation2)\n",
    "# dropout2 = Dropout(0.25)(maxpooling2)\n",
    "\n",
    "flatten = Flatten()(maxpooling2)\n",
    "\n",
    "dense1 = Dense(1024)(flatten)\n",
    "activation3 = Activation('relu')(dense1)\n",
    "\n",
    "dense2 = Dense(10)(activation3)\n",
    "activation4 = Activation('softmax')(dense2)\n",
    "\n",
    "model_dense2_output = Model(inputs=inputs, outputs=activation3)\n",
    "model = Model(inputs=inputs, outputs=activation4)\n",
    "\n",
    "\n",
    "adam = Adam(lr=1e-4)\n",
    "\n",
    "# We add metrics to get more results you want to see\n",
    "model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['accuracy'])\n",
    "\n",
    "print('Training ------------')\n",
    "# Another way to train the model\n",
    "model.fit(X_train, Y_train, epochs=5, batch_size=16)\n",
    "\n",
    "print('\\nTesting ------------')\n",
    "# Evaluate the model with the metrics we defined earlier\n",
    "loss, accuracy = model.evaluate(X_test, Y_test)\n",
    "\n",
    "print('\\ntest loss: ', loss)\n",
    "print('\\ntest accuracy: ', accuracy)\n",
    "\n",
    "#with xgboost\n",
    "import xgboost\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC,LinearSVC\n",
    "from sklearn.grid_search import GridSearchCV\n",
    "\n",
    "X_train_xg = model_dense2_output.predict(X_train)\n",
    "X_test_xg = model_dense2_output.predict(X_test)\n",
    "# print(np.array(X_train_xg).shape)\n",
    "# print(np.array(y_train).shape)\n",
    "\n",
    "z = np.concatenate([np.array(X_train_xg).reshape(60000,1024),np.array(y_train).reshape(60000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('train_xg.csv',index=False)\n",
    "\n",
    "z = np.concatenate([np.array(X_test_xg).reshape(10000,1024),np.array(y_test).reshape(10000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('test_xg.csv',index=False)\n",
    "\n",
    "train = pd.read_csv('train_xg.csv')\n",
    "train_y = train['1024'].astype('int')\n",
    "train_x = train.drop(['1024'],axis=1)\n",
    "dataset = xgboost.DMatrix(train_x, label=train_y)\n",
    "watchlist = [(dataset, 'train')]\n",
    "params = {'max_depth':7, 'eta':0.1, 'silent':1, 'num_class':10,'objective':'multi:softmax' } \n",
    "model_xg = xgboost.train(params, dataset, num_boost_round=150, evals=watchlist)\n",
    "\n",
    "test = pd.read_csv('test_xg.csv')\n",
    "test_y = test['1024'].astype('int')\n",
    "test_x = test.drop(['1024'],axis=1)\n",
    "\n",
    "test_x = xgboost.DMatrix(test_x)\n",
    "result = model_xg.predict(test_x)\n",
    "#print(t)\n",
    "#m = LogisticRegression()\n",
    "#m = SVC(kernel='rbf',decision_function_shape='ovr')\n",
    "#m.fit(X,y)\n",
    "#print(m)\n",
    "\n",
    "print(metrics.classification_report(test_y, result))\n",
    "print(metrics.confusion_matrix(test_y, result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python2.7/dist-packages/ipykernel_launcher.py:38: UserWarning: The `input_dim` and `input_length` arguments in recurrent layers are deprecated. Use `input_shape` instead.\n",
      "/usr/local/lib/python2.7/dist-packages/ipykernel_launcher.py:38: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(units=50, input_shape=(28, 28))`\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "input_3 (InputLayer)         (None, 28, 28)            0         \n",
      "_________________________________________________________________\n",
      "lstm_1 (LSTM)                (None, 50)                15800     \n",
      "_________________________________________________________________\n",
      "dense_3 (Dense)              (None, 10)                510       \n",
      "=================================================================\n",
      "Total params: 16,310\n",
      "Trainable params: 16,310\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "None\n",
      "Training ------------\n",
      "Epoch 1/5\n",
      "60000/60000 [==============================] - 71s 1ms/step - loss: 0.3741 - acc: 0.8834\n",
      "Epoch 2/5\n",
      "60000/60000 [==============================] - 70s 1ms/step - loss: 0.1343 - acc: 0.9595\n",
      "Epoch 3/5\n",
      "60000/60000 [==============================] - 70s 1ms/step - loss: 0.0922 - acc: 0.9718\n",
      "Epoch 4/5\n",
      "60000/60000 [==============================] - 70s 1ms/step - loss: 0.0727 - acc: 0.9782\n",
      "Epoch 5/5\n",
      "60000/60000 [==============================] - 72s 1ms/step - loss: 0.0598 - acc: 0.9821\n",
      "\n",
      "Testing ------------\n",
      "10000/10000 [==============================] - 2s 185us/step\n",
      "('\\ntest loss: ', 0.061161279658973215)\n",
      "('\\ntest accuracy: ', 0.98260000000000003)\n",
      "             precision    recall  f1-score   support\n",
      "\n",
      "          0       0.99      0.99      0.99       980\n",
      "          1       0.99      1.00      0.99      1135\n",
      "          2       0.98      0.98      0.98      1032\n",
      "          3       0.98      0.98      0.98      1010\n",
      "          4       0.99      0.97      0.98       982\n",
      "          5       0.98      0.99      0.98       892\n",
      "          6       0.99      0.98      0.98       958\n",
      "          7       0.97      0.99      0.98      1028\n",
      "          8       0.99      0.98      0.98       974\n",
      "          9       0.98      0.97      0.98      1009\n",
      "\n",
      "avg / total       0.98      0.98      0.98     10000\n",
      "\n",
      "[[ 972    0    1    0    2    2    2    1    0    0]\n",
      " [   0 1130    3    0    0    1    0    0    1    0]\n",
      " [   1    3 1015    3    2    0    0    8    0    0]\n",
      " [   0    1    4  990    0    3    0    6    1    5]\n",
      " [   0    1    3    0  956    0    4    6    1   11]\n",
      " [   0    0    0    8    0  881    1    1    0    1]\n",
      " [   5    2    1    0    3    7  935    0    4    1]\n",
      " [   0    3    7    2    0    0    0 1013    0    3]\n",
      " [   1    2    2    5    1    4    1    4  952    2]\n",
      " [   2    0    0    3    6    2    1    9    4  982]]\n",
      "[0]\ttrain-merror:0.017917\n",
      "[1]\ttrain-merror:0.015867\n",
      "[2]\ttrain-merror:0.014217\n",
      "[3]\ttrain-merror:0.012717\n",
      "[4]\ttrain-merror:0.011633\n",
      "[5]\ttrain-merror:0.011033\n",
      "[6]\ttrain-merror:0.010533\n",
      "[7]\ttrain-merror:0.009967\n",
      "[8]\ttrain-merror:0.009667\n",
      "[9]\ttrain-merror:0.009267\n",
      "[10]\ttrain-merror:0.008917\n",
      "[11]\ttrain-merror:0.008533\n",
      "[12]\ttrain-merror:0.008\n",
      "[13]\ttrain-merror:0.007783\n",
      "[14]\ttrain-merror:0.007517\n",
      "[15]\ttrain-merror:0.007067\n",
      "[16]\ttrain-merror:0.006867\n",
      "[17]\ttrain-merror:0.0066\n",
      "[18]\ttrain-merror:0.006417\n",
      "[19]\ttrain-merror:0.005967\n",
      "[20]\ttrain-merror:0.00565\n",
      "[21]\ttrain-merror:0.005483\n",
      "[22]\ttrain-merror:0.005217\n",
      "[23]\ttrain-merror:0.00505\n",
      "[24]\ttrain-merror:0.004767\n",
      "[25]\ttrain-merror:0.004433\n",
      "[26]\ttrain-merror:0.004117\n",
      "[27]\ttrain-merror:0.003933\n",
      "[28]\ttrain-merror:0.003767\n",
      "[29]\ttrain-merror:0.00355\n",
      "[30]\ttrain-merror:0.003433\n",
      "[31]\ttrain-merror:0.00325\n",
      "[32]\ttrain-merror:0.00315\n",
      "[33]\ttrain-merror:0.002983\n",
      "[34]\ttrain-merror:0.002917\n",
      "[35]\ttrain-merror:0.0027\n",
      "[36]\ttrain-merror:0.00265\n",
      "[37]\ttrain-merror:0.0025\n",
      "[38]\ttrain-merror:0.00235\n",
      "[39]\ttrain-merror:0.0023\n",
      "[40]\ttrain-merror:0.002217\n",
      "[41]\ttrain-merror:0.002133\n",
      "[42]\ttrain-merror:0.002\n",
      "[43]\ttrain-merror:0.00185\n",
      "[44]\ttrain-merror:0.001767\n",
      "[45]\ttrain-merror:0.0017\n",
      "[46]\ttrain-merror:0.001633\n",
      "[47]\ttrain-merror:0.001517\n",
      "[48]\ttrain-merror:0.00145\n",
      "[49]\ttrain-merror:0.001333\n",
      "[50]\ttrain-merror:0.001283\n",
      "[51]\ttrain-merror:0.001133\n",
      "[52]\ttrain-merror:0.0011\n",
      "[53]\ttrain-merror:0.001017\n",
      "[54]\ttrain-merror:0.000883\n",
      "[55]\ttrain-merror:0.00085\n",
      "[56]\ttrain-merror:0.000783\n",
      "[57]\ttrain-merror:0.000733\n",
      "[58]\ttrain-merror:0.0007\n",
      "[59]\ttrain-merror:0.00065\n",
      "[60]\ttrain-merror:0.000633\n",
      "[61]\ttrain-merror:0.000633\n",
      "[62]\ttrain-merror:0.000567\n",
      "[63]\ttrain-merror:0.00055\n",
      "[64]\ttrain-merror:0.00055\n",
      "[65]\ttrain-merror:0.00055\n",
      "[66]\ttrain-merror:0.00055\n",
      "[67]\ttrain-merror:0.000517\n",
      "[68]\ttrain-merror:0.000467\n",
      "[69]\ttrain-merror:0.000467\n",
      "[70]\ttrain-merror:0.000433\n",
      "[71]\ttrain-merror:0.000417\n",
      "[72]\ttrain-merror:0.0004\n",
      "[73]\ttrain-merror:0.000367\n",
      "[74]\ttrain-merror:0.00035\n",
      "[75]\ttrain-merror:0.000317\n",
      "[76]\ttrain-merror:0.000317\n",
      "[77]\ttrain-merror:0.00025\n",
      "[78]\ttrain-merror:0.000233\n",
      "[79]\ttrain-merror:0.000233\n",
      "[80]\ttrain-merror:0.000233\n",
      "[81]\ttrain-merror:0.0002\n",
      "[82]\ttrain-merror:0.000183\n",
      "[83]\ttrain-merror:0.000183\n",
      "[84]\ttrain-merror:0.000167\n",
      "[85]\ttrain-merror:0.00015\n",
      "[86]\ttrain-merror:0.00015\n",
      "[87]\ttrain-merror:0.000133\n",
      "[88]\ttrain-merror:0.000117\n",
      "[89]\ttrain-merror:0.0001\n",
      "[90]\ttrain-merror:0.0001\n",
      "[91]\ttrain-merror:0.0001\n",
      "[92]\ttrain-merror:8.3e-05\n",
      "[93]\ttrain-merror:6.7e-05\n",
      "[94]\ttrain-merror:5e-05\n",
      "[95]\ttrain-merror:5e-05\n",
      "[96]\ttrain-merror:5e-05\n",
      "[97]\ttrain-merror:5e-05\n",
      "[98]\ttrain-merror:5e-05\n",
      "[99]\ttrain-merror:3.3e-05\n",
      "             precision    recall  f1-score   support\n",
      "\n",
      "          0       0.99      0.99      0.99       980\n",
      "          1       0.99      0.99      0.99      1135\n",
      "          2       0.98      0.99      0.99      1032\n",
      "          3       0.98      0.99      0.98      1010\n",
      "          4       0.99      0.97      0.98       982\n",
      "          5       0.98      0.98      0.98       892\n",
      "          6       0.99      0.98      0.98       958\n",
      "          7       0.98      0.98      0.98      1028\n",
      "          8       0.98      0.99      0.98       974\n",
      "          9       0.98      0.97      0.97      1009\n",
      "\n",
      "avg / total       0.98      0.98      0.98     10000\n",
      "\n",
      "[[ 972    0    1    0    1    2    3    1    0    0]\n",
      " [   0 1129    2    1    0    0    1    0    2    0]\n",
      " [   1    0 1024    0    0    0    1    4    2    0]\n",
      " [   0    0    6  996    0    2    0    2    1    3]\n",
      " [   0    0    1    0  957    0    7    2    3   12]\n",
      " [   0    0    0    9    0  877    2    0    1    3]\n",
      " [   1    2    0    0    3    2  942    0    7    1]\n",
      " [   1    3    9    2    1    0    0 1007    0    5]\n",
      " [   2    1    1    2    1    5    0    1  961    0]\n",
      " [   2    2    0    3    6    3    0    6    5  982]]\n"
     ]
    }
   ],
   "source": [
    "#use rnn and xgboost\n",
    "import numpy as np\n",
    "np.random.seed(1337)  # for reproducibility\n",
    "from keras.datasets import mnist\n",
    "from keras.utils import np_utils\n",
    "from keras.models import Sequential, Model\n",
    "from keras.layers import *\n",
    "from keras.optimizers import Adam\n",
    "import xgboost\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC,LinearSVC\n",
    "from sklearn.grid_search import GridSearchCV\n",
    "\n",
    "TIME_STEPS = 28     # same as the height of the image\n",
    "INPUT_SIZE = 28     # same as the width of the image\n",
    "BATCH_SIZE = 50\n",
    "BATCH_INDEX = 0\n",
    "OUTPUT_SIZE = 10\n",
    "CELL_SIZE = 50\n",
    "LR = 0.001\n",
    "\n",
    "\n",
    "# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called\n",
    "# X shape (60,000 28x28), y shape (10,000, )\n",
    "(X_train, y_train), (X_test, y_test) = mnist.load_data('mnist.npz')\n",
    "\n",
    "# data pre-processing\n",
    "X_train = X_train.reshape(-1, 28, 28) / 255.      # normalize\n",
    "X_test = X_test.reshape(-1, 28, 28) / 255.        # normalize\n",
    "Y_train = np_utils.to_categorical(y_train, num_classes=10)\n",
    "Y_test = np_utils.to_categorical(y_test, num_classes=10)\n",
    "\n",
    "inputs = Input(shape = (28,28))\n",
    "#srnn = SimpleRNN(units=CELL_SIZE,input_dim=INPUT_SIZE, input_length=TIME_STEPS )(inputs)\n",
    "srnn = LSTM(units=CELL_SIZE,input_dim=INPUT_SIZE, input_length=TIME_STEPS )(inputs)\n",
    "dense = Dense(OUTPUT_SIZE,activation='softmax')(srnn)\n",
    "\n",
    "model_dense2_output = Model(inputs=inputs, outputs=srnn)\n",
    "model = Model(inputs=inputs, outputs=dense)\n",
    "\n",
    "# optimizer\n",
    "adam = Adam(LR)\n",
    "model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['accuracy'])\n",
    "print(model.summary())\n",
    "# training\n",
    "print('Training ------------')\n",
    "model.fit(X_train, Y_train, epochs=5, batch_size=16)\n",
    "\n",
    "print('\\nTesting ------------')\n",
    "# Evaluate the model with the metrics we defined earlier\n",
    "result = model.predict(X_test)\n",
    "loss, accuracy = model.evaluate(X_test, Y_test)\n",
    "\n",
    "print('\\ntest loss: ', loss)\n",
    "print('\\ntest accuracy: ', accuracy)\n",
    "\n",
    "a = np.argmax(Y_test,axis = 1)\n",
    "b = np.argmax(result,axis = 1)\n",
    "print(metrics.classification_report(a, b))\n",
    "print(metrics.confusion_matrix(a, b))\n",
    "\n",
    "#with xgboost\n",
    "X_train_xg = model_dense2_output.predict(X_train)\n",
    "X_test_xg = model_dense2_output.predict(X_test)\n",
    "\n",
    "z = np.concatenate([np.array(X_train_xg).reshape(60000,50),np.array(y_train).reshape(60000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('train_xg_rnn.csv',index=False)\n",
    "\n",
    "z = np.concatenate([np.array(X_test_xg).reshape(10000,50),np.array(y_test).reshape(10000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('test_xg_rnn.csv',index=False)\n",
    "\n",
    "train = pd.read_csv('train_xg_rnn.csv')\n",
    "train_y = train['50'].astype('int')\n",
    "train_x = train.drop(['50'],axis=1)\n",
    "dataset = xgboost.DMatrix(train_x, label=train_y)\n",
    "watchlist = [(dataset, 'train')]\n",
    "params = {'max_depth':7, 'eta':0.1, 'silent':1, 'num_class':10,'objective':'multi:softmax' } \n",
    "model_xg = xgboost.train(params, dataset, num_boost_round=100, evals=watchlist)\n",
    "\n",
    "test = pd.read_csv('test_xg_rnn.csv')\n",
    "test_y = test['50'].astype('int')\n",
    "test_x = test.drop(['50'],axis=1)\n",
    "\n",
    "test_x = xgboost.DMatrix(test_x)\n",
    "result = model_xg.predict(test_x)\n",
    "#print(t)\n",
    "#m = LogisticRegression()\n",
    "#m = SVC(kernel='rbf',decision_function_shape='ovr')\n",
    "#m.fit(X,y)\n",
    "#print(m)\n",
    "\n",
    "print(metrics.classification_report(test_y, result))\n",
    "print(metrics.confusion_matrix(test_y, result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-merror:0.103083\n",
      "[1]\ttrain-merror:0.08205\n",
      "[2]\ttrain-merror:0.071533\n",
      "[3]\ttrain-merror:0.064783\n",
      "[4]\ttrain-merror:0.060833\n",
      "[5]\ttrain-merror:0.0577\n",
      "[6]\ttrain-merror:0.054833\n",
      "[7]\ttrain-merror:0.0521\n",
      "[8]\ttrain-merror:0.049417\n",
      "[9]\ttrain-merror:0.047383\n",
      "[10]\ttrain-merror:0.0452\n",
      "[11]\ttrain-merror:0.042817\n",
      "[12]\ttrain-merror:0.04085\n",
      "[13]\ttrain-merror:0.039467\n",
      "[14]\ttrain-merror:0.038\n",
      "[15]\ttrain-merror:0.03615\n",
      "[16]\ttrain-merror:0.034917\n",
      "[17]\ttrain-merror:0.03355\n",
      "[18]\ttrain-merror:0.0324\n",
      "[19]\ttrain-merror:0.031233\n",
      "[20]\ttrain-merror:0.029817\n",
      "[21]\ttrain-merror:0.028733\n",
      "[22]\ttrain-merror:0.027383\n",
      "[23]\ttrain-merror:0.026283\n",
      "[24]\ttrain-merror:0.025183\n",
      "[25]\ttrain-merror:0.024417\n",
      "[26]\ttrain-merror:0.0235\n",
      "[27]\ttrain-merror:0.022617\n",
      "[28]\ttrain-merror:0.021817\n",
      "[29]\ttrain-merror:0.020817\n",
      "[30]\ttrain-merror:0.019783\n",
      "[31]\ttrain-merror:0.019067\n",
      "[32]\ttrain-merror:0.018283\n",
      "[33]\ttrain-merror:0.0176\n",
      "[34]\ttrain-merror:0.016983\n",
      "[35]\ttrain-merror:0.016333\n",
      "[36]\ttrain-merror:0.015617\n",
      "[37]\ttrain-merror:0.015217\n",
      "[38]\ttrain-merror:0.0146\n",
      "[39]\ttrain-merror:0.013933\n",
      "[40]\ttrain-merror:0.013533\n",
      "[41]\ttrain-merror:0.012933\n",
      "[42]\ttrain-merror:0.01245\n",
      "[43]\ttrain-merror:0.012017\n",
      "[44]\ttrain-merror:0.011633\n",
      "[45]\ttrain-merror:0.01085\n",
      "[46]\ttrain-merror:0.0104\n",
      "[47]\ttrain-merror:0.00985\n",
      "[48]\ttrain-merror:0.0094\n",
      "[49]\ttrain-merror:0.009017\n",
      "[50]\ttrain-merror:0.008717\n",
      "[51]\ttrain-merror:0.008367\n",
      "[52]\ttrain-merror:0.00785\n",
      "[53]\ttrain-merror:0.007533\n",
      "[54]\ttrain-merror:0.007267\n",
      "[55]\ttrain-merror:0.006967\n",
      "[56]\ttrain-merror:0.006617\n",
      "[57]\ttrain-merror:0.006483\n",
      "[58]\ttrain-merror:0.006283\n",
      "[59]\ttrain-merror:0.005883\n",
      "[60]\ttrain-merror:0.005667\n",
      "[61]\ttrain-merror:0.005467\n",
      "[62]\ttrain-merror:0.005217\n",
      "[63]\ttrain-merror:0.005\n",
      "[64]\ttrain-merror:0.004783\n",
      "[65]\ttrain-merror:0.004517\n",
      "[66]\ttrain-merror:0.004417\n",
      "[67]\ttrain-merror:0.004283\n",
      "[68]\ttrain-merror:0.00415\n",
      "[69]\ttrain-merror:0.003933\n",
      "[70]\ttrain-merror:0.00365\n",
      "[71]\ttrain-merror:0.003533\n",
      "[72]\ttrain-merror:0.0033\n",
      "[73]\ttrain-merror:0.003033\n",
      "[74]\ttrain-merror:0.0029\n",
      "[75]\ttrain-merror:0.002783\n",
      "[76]\ttrain-merror:0.002667\n",
      "[77]\ttrain-merror:0.002517\n",
      "[78]\ttrain-merror:0.002367\n",
      "[79]\ttrain-merror:0.002367\n",
      "[80]\ttrain-merror:0.002233\n",
      "[81]\ttrain-merror:0.002183\n",
      "[82]\ttrain-merror:0.00205\n",
      "[83]\ttrain-merror:0.001967\n",
      "[84]\ttrain-merror:0.001767\n",
      "[85]\ttrain-merror:0.001633\n",
      "[86]\ttrain-merror:0.0016\n",
      "[87]\ttrain-merror:0.00155\n",
      "[88]\ttrain-merror:0.0015\n",
      "[89]\ttrain-merror:0.001483\n",
      "[90]\ttrain-merror:0.001417\n",
      "[91]\ttrain-merror:0.001367\n",
      "[92]\ttrain-merror:0.001283\n",
      "[93]\ttrain-merror:0.001167\n",
      "[94]\ttrain-merror:0.001117\n",
      "[95]\ttrain-merror:0.001067\n",
      "[96]\ttrain-merror:0.001\n",
      "[97]\ttrain-merror:0.000967\n",
      "[98]\ttrain-merror:0.000883\n",
      "[99]\ttrain-merror:0.00085\n",
      "             precision    recall  f1-score   support\n",
      "\n",
      "          0       0.97      0.99      0.98       980\n",
      "          1       0.99      0.99      0.99      1135\n",
      "          2       0.96      0.97      0.97      1032\n",
      "          3       0.98      0.97      0.98      1010\n",
      "          4       0.98      0.97      0.98       982\n",
      "          5       0.98      0.97      0.97       892\n",
      "          6       0.98      0.97      0.98       958\n",
      "          7       0.97      0.96      0.97      1028\n",
      "          8       0.96      0.97      0.96       974\n",
      "          9       0.96      0.96      0.96      1009\n",
      "\n",
      "avg / total       0.97      0.97      0.97     10000\n",
      "\n",
      "[[ 968    1    0    0    0    2    3    1    4    1]\n",
      " [   1 1122    5    1    0    1    3    1    1    0]\n",
      " [   6    0 1000    7    3    0    0    8    8    0]\n",
      " [   2    0    4  984    0    4    0    7    4    5]\n",
      " [   0    0    3    0  955    0    5    1    3   15]\n",
      " [   3    1    1    7    0  862    5    4    5    4]\n",
      " [   6    3    0    0    4    5  934    0    6    0]\n",
      " [   2    4   18    3    1    0    0  990    3    7]\n",
      " [   4    0    3    1    2    5    3    2  943   11]\n",
      " [   6    6    3    3    8    2    0    5    5  971]]\n"
     ]
    }
   ],
   "source": [
    "#only xgboost\n",
    "import numpy as np\n",
    "np.random.seed(1337)  # for reproducibility\n",
    "from keras.datasets import mnist\n",
    "from keras.utils import np_utils\n",
    "import xgboost\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC,LinearSVC\n",
    "from sklearn.grid_search import GridSearchCV\n",
    "\n",
    "# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called\n",
    "# X shape (60,000 28x28), y shape (10,000, )\n",
    "(X_train, y_train), (X_test, y_test) = mnist.load_data('mnist.npz')\n",
    "\n",
    "# data pre-processing\n",
    "X_train = X_train.reshape(-1, 28*28) / 255.      # normalize\n",
    "X_test = X_test.reshape(-1, 28*28) / 255.        # normalize\n",
    "X_train.shape\n",
    "\n",
    "z = np.concatenate([np.array(X_train).reshape(60000,784),np.array(y_train).reshape(60000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('train_xg_only.csv',index=False)\n",
    "\n",
    "z = np.concatenate([np.array(X_test).reshape(10000,784),np.array(y_test).reshape(10000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('test_xg_only.csv',index=False)\n",
    "\n",
    "train = pd.read_csv('train_xg_only.csv')\n",
    "train_y = train['784'].astype('int')\n",
    "train_x = train.drop(['784'],axis=1)\n",
    "dataset = xgboost.DMatrix(train_x, label=train_y)\n",
    "watchlist = [(dataset, 'train')]\n",
    "params = {'max_depth':7, 'eta':0.1, 'silent':1, 'num_class':10,'objective':'multi:softmax' } \n",
    "model_xg = xgboost.train(params, dataset, num_boost_round=100, evals=watchlist)\n",
    "\n",
    "test = pd.read_csv('test_xg_only.csv')\n",
    "test_y = test['784'].astype('int')\n",
    "test_x = test.drop(['784'],axis=1)\n",
    "\n",
    "test_x = xgboost.DMatrix(test_x)\n",
    "result = model_xg.predict(test_x)\n",
    "#print(t)\n",
    "#m = LogisticRegression()\n",
    "#m = SVC(kernel='rbf',decision_function_shape='ovr')\n",
    "#m.fit(X,y)\n",
    "#print(m)\n",
    "\n",
    "print(metrics.classification_report(test_y, result))\n",
    "print(metrics.confusion_matrix(test_y, result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(60000, 784)\n",
      "(10000, 784)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python2.7/dist-packages/ipykernel_launcher.py:41: UserWarning: Update your `Model` call to the Keras 2 API: `Model(outputs=Tensor(\"de..., inputs=Tensor(\"in...)`\n",
      "/usr/local/lib/python2.7/dist-packages/ipykernel_launcher.py:43: UserWarning: Update your `Model` call to the Keras 2 API: `Model(outputs=Tensor(\"de..., inputs=Tensor(\"in...)`\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "60000/60000 [==============================] - 36s 594us/step - loss: 0.0287\n",
      "Epoch 2/5\n",
      "60000/60000 [==============================] - 35s 585us/step - loss: 0.0215\n",
      "Epoch 3/5\n",
      "60000/60000 [==============================] - 35s 586us/step - loss: 0.0198\n",
      "Epoch 4/5\n",
      "60000/60000 [==============================] - 35s 590us/step - loss: 0.0188\n",
      "Epoch 5/5\n",
      "60000/60000 [==============================] - 35s 592us/step - loss: 0.0182\n",
      "[0]\ttrain-merror:0.118033\n",
      "[1]\ttrain-merror:0.101167\n",
      "[2]\ttrain-merror:0.093783\n",
      "[3]\ttrain-merror:0.088967\n",
      "[4]\ttrain-merror:0.084983\n",
      "[5]\ttrain-merror:0.081833\n",
      "[6]\ttrain-merror:0.079867\n",
      "[7]\ttrain-merror:0.077567\n",
      "[8]\ttrain-merror:0.074867\n",
      "[9]\ttrain-merror:0.07305\n",
      "[10]\ttrain-merror:0.071417\n",
      "[11]\ttrain-merror:0.06945\n",
      "[12]\ttrain-merror:0.067667\n",
      "[13]\ttrain-merror:0.066017\n",
      "[14]\ttrain-merror:0.0647\n",
      "[15]\ttrain-merror:0.062833\n",
      "[16]\ttrain-merror:0.061567\n",
      "[17]\ttrain-merror:0.06025\n",
      "[18]\ttrain-merror:0.058817\n",
      "[19]\ttrain-merror:0.057567\n",
      "[20]\ttrain-merror:0.056467\n",
      "[21]\ttrain-merror:0.055417\n",
      "[22]\ttrain-merror:0.054117\n",
      "[23]\ttrain-merror:0.052833\n",
      "[24]\ttrain-merror:0.051367\n",
      "[25]\ttrain-merror:0.050567\n",
      "[26]\ttrain-merror:0.049183\n",
      "[27]\ttrain-merror:0.048133\n",
      "[28]\ttrain-merror:0.047217\n",
      "[29]\ttrain-merror:0.046083\n",
      "[30]\ttrain-merror:0.0447\n",
      "[31]\ttrain-merror:0.0436\n",
      "[32]\ttrain-merror:0.042667\n",
      "[33]\ttrain-merror:0.042033\n",
      "[34]\ttrain-merror:0.04095\n",
      "[35]\ttrain-merror:0.040083\n",
      "[36]\ttrain-merror:0.038833\n",
      "[37]\ttrain-merror:0.038033\n",
      "[38]\ttrain-merror:0.0373\n",
      "[39]\ttrain-merror:0.036467\n",
      "[40]\ttrain-merror:0.035717\n",
      "[41]\ttrain-merror:0.035\n",
      "[42]\ttrain-merror:0.033917\n",
      "[43]\ttrain-merror:0.033467\n",
      "[44]\ttrain-merror:0.032567\n",
      "[45]\ttrain-merror:0.031883\n",
      "[46]\ttrain-merror:0.03125\n",
      "[47]\ttrain-merror:0.030333\n",
      "[48]\ttrain-merror:0.029867\n",
      "[49]\ttrain-merror:0.029283\n",
      "[50]\ttrain-merror:0.028617\n",
      "[51]\ttrain-merror:0.0281\n",
      "[52]\ttrain-merror:0.027217\n",
      "[53]\ttrain-merror:0.026667\n",
      "[54]\ttrain-merror:0.026267\n",
      "[55]\ttrain-merror:0.02565\n",
      "[56]\ttrain-merror:0.02505\n",
      "[57]\ttrain-merror:0.024683\n",
      "[58]\ttrain-merror:0.024317\n",
      "[59]\ttrain-merror:0.023883\n",
      "[60]\ttrain-merror:0.023317\n",
      "[61]\ttrain-merror:0.023083\n",
      "[62]\ttrain-merror:0.02265\n",
      "[63]\ttrain-merror:0.022333\n",
      "[64]\ttrain-merror:0.021933\n",
      "[65]\ttrain-merror:0.021417\n",
      "[66]\ttrain-merror:0.021\n",
      "[67]\ttrain-merror:0.0205\n",
      "[68]\ttrain-merror:0.020283\n",
      "[69]\ttrain-merror:0.0199\n",
      "[70]\ttrain-merror:0.019583\n",
      "[71]\ttrain-merror:0.019317\n",
      "[72]\ttrain-merror:0.018917\n",
      "[73]\ttrain-merror:0.018617\n",
      "[74]\ttrain-merror:0.018333\n",
      "[75]\ttrain-merror:0.017867\n",
      "[76]\ttrain-merror:0.017617\n",
      "[77]\ttrain-merror:0.017367\n",
      "[78]\ttrain-merror:0.017217\n",
      "[79]\ttrain-merror:0.016917\n",
      "[80]\ttrain-merror:0.0166\n",
      "[81]\ttrain-merror:0.016317\n",
      "[82]\ttrain-merror:0.0161\n",
      "[83]\ttrain-merror:0.015767\n",
      "[84]\ttrain-merror:0.015533\n",
      "[85]\ttrain-merror:0.0153\n",
      "[86]\ttrain-merror:0.01505\n",
      "[87]\ttrain-merror:0.014733\n",
      "[88]\ttrain-merror:0.014417\n",
      "[89]\ttrain-merror:0.014117\n",
      "[90]\ttrain-merror:0.013917\n",
      "[91]\ttrain-merror:0.0137\n",
      "[92]\ttrain-merror:0.013433\n",
      "[93]\ttrain-merror:0.013033\n",
      "[94]\ttrain-merror:0.01285\n",
      "[95]\ttrain-merror:0.012417\n",
      "[96]\ttrain-merror:0.012317\n",
      "[97]\ttrain-merror:0.012267\n",
      "[98]\ttrain-merror:0.01215\n",
      "[99]\ttrain-merror:0.011733\n",
      "             precision    recall  f1-score   support\n",
      "\n",
      "          0       0.97      0.98      0.97       980\n",
      "          1       0.99      0.99      0.99      1135\n",
      "          2       0.94      0.94      0.94      1032\n",
      "          3       0.92      0.93      0.92      1010\n",
      "          4       0.96      0.94      0.95       982\n",
      "          5       0.94      0.95      0.94       892\n",
      "          6       0.96      0.97      0.97       958\n",
      "          7       0.96      0.93      0.95      1028\n",
      "          8       0.91      0.92      0.92       974\n",
      "          9       0.91      0.93      0.92      1009\n",
      "\n",
      "avg / total       0.95      0.95      0.95     10000\n",
      "\n",
      "[[ 959    0    4    2    1    4    6    1    2    1]\n",
      " [   0 1118    3    2    1    0    4    0    4    3]\n",
      " [   8    1  972   19    0    2    4    9   14    3]\n",
      " [   3    1   10  935    1   22    0    5   28    5]\n",
      " [   0    0    2    0  923    0   11    3    3   40]\n",
      " [   8    0    1   15    2  849    5    1    9    2]\n",
      " [   7    4    4    1    6    5  926    0    5    0]\n",
      " [   1    4   22    2    5    2    0  959    2   31]\n",
      " [   2    0    8   30    1   23    3    2  894   11]\n",
      " [   4    6    3    6   17    1    2   14   17  939]]\n"
     ]
    }
   ],
   "source": [
    "#aotoencoder with xgboost\n",
    "\n",
    "import numpy as np\n",
    "np.random.seed(1337)  # for reproducibility\n",
    "\n",
    "from keras.datasets import mnist\n",
    "from keras.models import Model\n",
    "from keras.layers import Dense, Input\n",
    "\n",
    "# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called\n",
    "# X shape (60,000 28x28), y shape (10,000, )\n",
    "(x_train, y_train), (x_test, y_test) = mnist.load_data('mnist.npz')\n",
    "\n",
    "# data pre-processing\n",
    "x_train = x_train.astype('float32') / 255. - 0.5       # minmax_normalized\n",
    "x_test = x_test.astype('float32') / 255. - 0.5         # minmax_normalized\n",
    "\n",
    "x_train = x_train.reshape((x_train.shape[0], -1))\n",
    "x_test = x_test.reshape((x_test.shape[0], -1))\n",
    "\n",
    "print(x_train.shape)\n",
    "print(x_test.shape)\n",
    "\n",
    "# in order to plot in a 2D figure\n",
    "encoding_dim = 10\n",
    "# this is our input placeholder\n",
    "input_img = Input(shape=(784,))\n",
    "# encoder layers\n",
    "encoded = Dense(256, activation='relu')(input_img)\n",
    "encoded = Dense(128, activation='relu')(encoded)\n",
    "encoded = Dense(64, activation='relu')(encoded)\n",
    "encoder_output = Dense(encoding_dim)(encoded)\n",
    "\n",
    "# decoder layers\n",
    "decoded = Dense(64, activation='relu')(encoder_output)\n",
    "decoded = Dense(128, activation='relu')(decoded)\n",
    "decoded = Dense(256, activation='relu')(decoded)\n",
    "decoded = Dense(784, activation='tanh')(decoded)\n",
    "\n",
    "# construct the autoencoder model\n",
    "autoencoder = Model(input=input_img, output=decoded)\n",
    "# construct the encoder model for plotting\n",
    "encoder = Model(input=input_img, output=encoder_output)\n",
    "# compile autoencoder\n",
    "autoencoder.compile(optimizer='adam', loss='mse')\n",
    "# training\n",
    "autoencoder.fit(x_train, x_train,epochs=5, batch_size=16,shuffle=True)\n",
    "\n",
    "import xgboost\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC,LinearSVC\n",
    "from sklearn.grid_search import GridSearchCV\n",
    "\n",
    "X_train_xg = encoder.predict(x_train)\n",
    "X_test_xg = encoder.predict(x_test)\n",
    "\n",
    "z = np.concatenate([np.array(X_train_xg).reshape(60000,10),np.array(y_train).reshape(60000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('train_xg_ae.csv',index=False)\n",
    "\n",
    "z = np.concatenate([np.array(X_test_xg).reshape(10000,10),np.array(y_test).reshape(10000,1)],axis=1)\n",
    "z = pd.DataFrame(z)\n",
    "z.to_csv('test_xg_ae.csv',index=False)\n",
    "\n",
    "train = pd.read_csv('train_xg_ae.csv')\n",
    "train_y = train['10'].astype('int')\n",
    "train_x = train.drop(['10'],axis=1)\n",
    "dataset = xgboost.DMatrix(train_x, label=train_y)\n",
    "watchlist = [(dataset, 'train')]\n",
    "params = {'max_depth':7, 'eta':0.1, 'silent':1, 'num_class':10,'objective':'multi:softmax' } \n",
    "model_xg = xgboost.train(params, dataset, num_boost_round=100, evals=watchlist)\n",
    "\n",
    "test = pd.read_csv('test_xg_ae.csv')\n",
    "test_y = test['10'].astype('int')\n",
    "test_x = test.drop(['10'],axis=1)\n",
    "\n",
    "test_x = xgboost.DMatrix(test_x)\n",
    "result = model_xg.predict(test_x)\n",
    "#print(t)\n",
    "#m = LogisticRegression()\n",
    "#m = SVC(kernel='rbf',decision_function_shape='ovr')\n",
    "#m.fit(X,y)\n",
    "#print(m)\n",
    "\n",
    "print(metrics.classification_report(test_y, result))\n",
    "print(metrics.confusion_matrix(test_y, result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
