{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "dataset shape is:  (1797, 8, 8)\n",
      "the number of train:  1137\n",
      "the number of test:  360\n",
      "the number of validation:  300\n",
      "training data shape:  (1137, 64)\n",
      "validation data shape:  (300, 64)\n",
      "test data shape:  (360, 64)\n"
     ]
    }
   ],
   "source": [
    "from sklearn import datasets\n",
    "from sklearn.model_selection import train_test_split\n",
    "# 加载sklearn自带的mnist数据\n",
    "digits = datasets.load_digits()\n",
    "# 数据集包含1797个手写体数字的图片，图片大小为8*8\n",
    "# 数字大小0～10，也就是说有这是个10分类问题\n",
    "images = digits.images\n",
    "targets = digits.target\n",
    "print((\"dataset shape is: \"), images.shape)\n",
    "# 将数据分为训练数据和测试数据（20%）\n",
    "X_train,X_test,y_train,y_test = train_test_split(images , targets , test_size=0.2 , random_state=0)\n",
    "num_training = 1137\n",
    "num_validation = 300\n",
    "num_test = y_test.shape[0]\n",
    "# 将训练集再分为训练集和验证集\n",
    "mask = list(range(num_training, num_training + num_validation))\n",
    "X_val = X_train[mask]\n",
    "y_val = y_train[mask]\n",
    "mask = list(range(num_training))\n",
    "X_train = X_train[mask]\n",
    "y_train = y_train[mask]\n",
    "mask = list(range(num_test))\n",
    "X_test = X_test[mask]\n",
    "y_test = y_test[mask]\n",
    "print(\"the number of train: \", num_training)\n",
    "print(\"the number of test: \", num_test)\n",
    "print(\"the number of validation: \", num_validation)\n",
    "# 将每个数字8*8的像素矩阵转化为64*1的向量\n",
    "X_train = X_train.reshape(num_training, -1)\n",
    "X_val = X_val.reshape(num_validation, -1)\n",
    "X_test = X_test.reshape(num_test, -1)\n",
    "print(\"training data shape: \", X_train.shape)\n",
    "print(\"validation data shape: \", X_val.shape)\n",
    "print(\"test data shape: \", X_test.shape)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义神经网络的参数\n",
    "# 定义超参\n",
    "input_size = 64\n",
    "hidden_size = 30\n",
    "num_classes = 10\n",
    "# 为了之后使用的方便，我将参数初始化，计算loss，训练，预测的过程都定义在一个名为network的类中\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "class network(object):\n",
    "    # 初始化参数,将W,b保存在名为params的字典中\n",
    "    # W随机初始化，b初始化为零\n",
    "    def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n",
    "        self.params = {}\n",
    "        self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n",
    "        self.params['b1'] = np.zeros(hidden_size)\n",
    "        self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n",
    "        self.params['b2'] = np.zeros(output_size)\n",
    "    # 定义损失函数，里面包含了前向传播的实现过程\n",
    "    def loss(self, X, y=None, reg=0.0):\n",
    "        # 先讲各个参数都提取出来\n",
    "        W1 = self.params['W1']\n",
    "        b1 = self.params['b1']\n",
    "        W2 = self.params['W2']\n",
    "        b2 = self.params['b2']\n",
    "        N, D = X.shape\n",
    "        # 前向传播\n",
    "        # hidden的实现\n",
    "        hidden = np.dot(X, W1) + b1\n",
    "        # relu:max(0, x)\n",
    "        hidden = np.maximum(0,hidden)\n",
    "        # 算输出y\n",
    "        y2 = np.dot(hidden, W2) + b2\n",
    "        #if y == None:\n",
    "          #  return y2\n",
    "        # loss 计算\n",
    "        loss = None\n",
    "        loss = -y2[range(N), y].sum() + np.log(np.exp(y2).sum(axis = 1)).sum()\n",
    "        loss = loss / N + 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n",
    "        # 反向传播\n",
    "        # 首先定义一个grads的字典，存放各个可训练参数的梯度\n",
    "        grads = {}\n",
    "        # 按照计算图，先计算dscore\n",
    "        # 先对y2取对数\n",
    "        exp = np.exp(y2)\n",
    "        # 求每行个元素的和，之后用每行各个元素除上该行的和\n",
    "        dscore = exp / exp.sum(axis = 1, keepdims = True)\n",
    "        # 对label（即y）对应的元素减1\n",
    "        dscore[range(N),y] -= 1\n",
    "        # 别忘了还要除输入样本的个数\n",
    "        dscore = dscore/N \n",
    "        grads['b2'] = np.sum(dscore, axis=0)\n",
    "        grads['W2'] = np.dot(hidden.T, dscore)+ reg * W2\n",
    "        # dhidden\n",
    "        dhidden = np.dot(dscore, W2.T)\n",
    "        # 因为加了relu激活函数，随意要讲XW1 + b1 <0对应的dihidden元素归0\n",
    "        dhidden[(np.dot(X, W1)+b1)<0] = 0\n",
    "        grads['b1'] = np.sum(dhidden, axis=0)\n",
    "        grads['W1'] = np.dot(X.T, dhidden) + reg * W1\n",
    "        return loss, grads\n",
    "    # 训练神经网络，使用了随机梯度下降，和学习率衰减的技巧\n",
    "    def train(self, X, y, X_val, y_val,\n",
    "            learning_rate=1e-3, learning_rate_decay=0.95,\n",
    "            reg=5e-6, num_iters=100,\n",
    "            batch_size=200, verbose=False):\n",
    "        # 查看有有多少个训练样本，并检查按照设定的batch大小每个epoch需要迭代多少次\n",
    "        num_train = X.shape[0]\n",
    "        iterations_per_epoch = max(num_train / batch_size, 1)\n",
    "\n",
    "        # 使用随机梯度下降优化可训练参数\n",
    "        # 把训练过程中得到的loss和准确率信息存起来方便查看并解决问题\n",
    "        loss_history = []\n",
    "        train_acc_history = []\n",
    "        val_acc_history = []\n",
    "        # 迭代numz_iters次，每次只随机选择一个batch来训练样本\n",
    "        for it in range(num_iters):\n",
    "            X_batch = None\n",
    "            y_batch = None\n",
    "            indices = np.random.choice(num_train, batch_size, replace=True)\n",
    "            X_batch = X[indices]\n",
    "            y_batch = y[indices]\n",
    "            # 用当前的batch训练数据来得到loss 和grad\n",
    "            loss, grads = self.loss(X_batch, y=y_batch, reg=reg)\n",
    "            # 记录这次迭代的损失大小\n",
    "            loss_history.append(loss)\n",
    "            self.params['W1'] -=  learning_rate * grads['W1']\n",
    "            self.params['b1'] -=  learning_rate * grads['b1']\n",
    "            self.params['W2'] -=  learning_rate * grads['W2']\n",
    "            self.params['b2'] -=  learning_rate * grads['b2']\n",
    "            # 如果你选择了可视化训练过程，那么会显示每次迭代产生的loss\n",
    "            if verbose and it % 100 == 0:\n",
    "                print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n",
    "\n",
    "           # Every epoch, check train and val accuracy and decay learning rate.\n",
    "            if it % iterations_per_epoch == 0:\n",
    "                # Check accuracy\n",
    "                train_acc = (self.predict(X_batch) == y_batch).mean()\n",
    "                val_acc = (self.predict(X_val) == y_val).mean()\n",
    "                train_acc_history.append(train_acc)\n",
    "                val_acc_history.append(val_acc)\n",
    "\n",
    "                # 每个epoch结束，衰减一下学习率\n",
    "                learning_rate *= learning_rate_decay\n",
    "\n",
    "        return {\n",
    "            'loss_history': loss_history,\n",
    "            'train_acc_history': train_acc_history,\n",
    "            'val_acc_history': val_acc_history,\n",
    "        }\n",
    "\n",
    "    def predict(self, X):\n",
    "    \n",
    "        y_pred = None\n",
    "        W1, b1 = self.params['W1'], self.params['b1']\n",
    "        W2, b2 = self.params['W2'], self.params['b2']\n",
    "\n",
    "        hidden_layer = np.maximum(0, np.dot(X, W1) + b1)\n",
    "        scores = np.dot(hidden_layer, W2) + b2\n",
    "        y_pred = np.argmax(scores, axis = 1)\n",
    "        return y_pred\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "        \n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "iteration 0 / 5000: loss 2.302588\n",
      "iteration 100 / 5000: loss 2.302319\n",
      "iteration 200 / 5000: loss 2.189721\n",
      "iteration 300 / 5000: loss 1.363581\n",
      "iteration 400 / 5000: loss 0.985627\n",
      "iteration 500 / 5000: loss 0.880485\n",
      "iteration 600 / 5000: loss 0.856803\n",
      "iteration 700 / 5000: loss 0.808877\n",
      "iteration 800 / 5000: loss 0.758501\n",
      "iteration 900 / 5000: loss 0.771681\n",
      "iteration 1000 / 5000: loss 0.786597\n",
      "iteration 1100 / 5000: loss 0.791402\n",
      "iteration 1200 / 5000: loss 0.737580\n",
      "iteration 1300 / 5000: loss 0.770927\n",
      "iteration 1400 / 5000: loss 0.778614\n",
      "iteration 1500 / 5000: loss 0.742769\n",
      "iteration 1600 / 5000: loss 0.752742\n",
      "iteration 1700 / 5000: loss 0.727401\n",
      "iteration 1800 / 5000: loss 0.721499\n",
      "iteration 1900 / 5000: loss 0.718781\n",
      "iteration 2000 / 5000: loss 0.740146\n",
      "iteration 2100 / 5000: loss 0.767352\n",
      "iteration 2200 / 5000: loss 0.738089\n",
      "iteration 2300 / 5000: loss 0.769300\n",
      "iteration 2400 / 5000: loss 0.699871\n",
      "iteration 2500 / 5000: loss 0.763431\n",
      "iteration 2600 / 5000: loss 0.704076\n",
      "iteration 2700 / 5000: loss 0.727613\n",
      "iteration 2800 / 5000: loss 0.747601\n",
      "iteration 2900 / 5000: loss 0.741193\n",
      "iteration 3000 / 5000: loss 0.744105\n",
      "iteration 3100 / 5000: loss 0.737767\n",
      "iteration 3200 / 5000: loss 0.759800\n",
      "iteration 3300 / 5000: loss 0.775625\n",
      "iteration 3400 / 5000: loss 0.745424\n",
      "iteration 3500 / 5000: loss 0.709397\n",
      "iteration 3600 / 5000: loss 0.719007\n",
      "iteration 3700 / 5000: loss 0.738790\n",
      "iteration 3800 / 5000: loss 0.734190\n",
      "iteration 3900 / 5000: loss 0.735703\n",
      "iteration 4000 / 5000: loss 0.722711\n",
      "iteration 4100 / 5000: loss 0.749296\n",
      "iteration 4200 / 5000: loss 0.782329\n",
      "iteration 4300 / 5000: loss 0.745129\n",
      "iteration 4400 / 5000: loss 0.796900\n",
      "iteration 4500 / 5000: loss 0.710180\n",
      "iteration 4600 / 5000: loss 0.760838\n",
      "iteration 4700 / 5000: loss 0.761966\n",
      "iteration 4800 / 5000: loss 0.712686\n",
      "iteration 4900 / 5000: loss 0.763593\n",
      "Validation accuracy:  0.9566666666666667\n",
      "test accuracy:  0.9555555555555556\n"
     ]
    }
   ],
   "source": [
    "net = network(input_size, hidden_size, num_classes)\n",
    "stats = net.train(X_train, y_train, X_val, y_val,\n",
    "            num_iters=5000, batch_size=200,\n",
    "            learning_rate=0.01, learning_rate_decay=0.95,\n",
    "            reg=0.25, verbose=True)\n",
    "val_acc = (net.predict(X_val) == y_val).mean()\n",
    "print('Validation accuracy: ', val_acc)\n",
    "\n",
    "test_acc = (net.predict(X_test) == y_test).mean()\n",
    "print('test accuracy: ', test_acc)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
