{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0ece2dc8",
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/python\n",
    "# -*- coding: utf-8 -*-\n",
    "\n",
    "import numpy as np\n",
    "import scipy.special\n",
    "import matplotlib.pyplot as plt\n",
    "import pylab\n",
    "\n",
    "class NeuralNetwork():\n",
    "    # 初始化神经网络\n",
    "    def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n",
    "         # 设置输入层节点，隐藏层节点和输出层节点的数量和学习率\n",
    "        self.inodes = inputnodes\n",
    "        self.hnodes = hiddennodes\n",
    "        self.onodes = outputnodes\n",
    "        self.lr = learningrate                #设置神经网络中的学习率\n",
    "        # 使用正态分布，进行权重矩阵的初始化\n",
    "        '''\n",
    "        np.random.normal第一个参数【loc】： 分布的均值（中心/对称轴x坐标设置）\n",
    "        np.random.normal第一个参数【scale】： 分布的标准差（宽度）\n",
    "        np.random.normal第一个参数【size】： 输出值的维度。如果给定的维度为(m, n, k)，那么就从分布中抽取m * n * k个样本。如果size为None（默认值）并且loc和scale均为标量，那么就会返回一个值。否则会返回np.broadcast(loc, scale).size个值\n",
    "        np.random.normal输出【out】： n维数组或标量（从含参的正态分布中抽取的随机样本）\n",
    "        '''\n",
    "        self.wih = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))\n",
    "        self.who = np.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))\n",
    "        self.activation_function = lambda x: scipy.special.expit(x)       #激活函数设为Sigmod()函数\n",
    "        pass\n",
    "    # 定义训练神经网络\n",
    "    print(\"************Train start******************\")\n",
    "    def train(self,input_list,target_list):\n",
    "        # # 转换输入/输出列表到二维数组，在外层多套(ndmin-1)层中括号，numpy dimension min，然后对矩阵进行转置\n",
    "        inputs = np.array(input_list, ndmin=2).T\n",
    "        targets = np.array(target_list,ndmin= 2).T   \n",
    "        #（矩阵乘法）\n",
    "        hidden_inputs = np.dot(self.wih, inputs)                           #计算到隐藏层的信号，dot()返回的是两个数组的点积\n",
    "        hidden_outputs = self.activation_function(hidden_inputs)           #计算隐藏层输出的信号\n",
    "        final_inputs = np.dot(self.who, hidden_outputs)                    #计算到输出层的信号\n",
    "        final_outputs = self.activation_function(final_inputs)\n",
    " \n",
    "        output_errors = targets - final_outputs                           #计算输出值与标签值的差值\n",
    "        #print(\"*****************************\")\n",
    "        #print(\"output_errors:\",output_errors)\n",
    "        hidden_errors = np.dot(self.who.T,output_errors)\n",
    " \n",
    "        '''\n",
    "        np.transpose函数\n",
    "        使用 numpy.transpose ()进行变换，其实就是交换了坐标轴，如：x.transpose(1, 2, 0)，其实就是将x第二维度挪到第一维上，第三维移到第二维上，原本的第一维移动到第三维上\n",
    "        举例：\n",
    "        x = np.arange(12).reshape((2,3,2))\n",
    "        x[0][0][0] = 0\t\t\t\tx[1][0][0] = 6\n",
    "        x[0][0][1] = 1\t\t\t\tx[1][0][1] = 7\n",
    "        x[0][1][0] = 2\t\t\t\tx[1][1][0] = 8\n",
    "        x[0][1][1] = 3\t\t\t\tx[1][1][1] = 9\n",
    "        x[0][2][0] = 4\t\t\t\tx[1][2][0] = 10\n",
    "        x[0][2][1] = 5\t\t\t\tx[1][2][1] = 11\n",
    "        转换后，shape = (3,2,2)\n",
    "        x[0][0][0] = 0\t\t\t\tx[0][0][1] = 6\n",
    "        x[0][1][0] = 1\t\t\t\tx[0][1][1] = 7\n",
    "        x[1][0][0] = 2\t\t\t\tx[1][0][1] = 8\n",
    "        x[1][1][0] = 3\t\t\t\tx[1][1][1] = 9\n",
    "        x[2][0][0] = 4\t\t\t\tx[2][0][1] = 10\n",
    "        x[2][1][0] = 5\t\t\t\tx[2][1][1] = 11\n",
    "        '''\n",
    "        #隐藏层和输出层权重更新\n",
    "        self.who += self.lr * np.dot((output_errors*final_outputs*(1.0-final_outputs)),\n",
    "                                       np.transpose(hidden_outputs))\n",
    "        #输入层和隐藏层权重更新\n",
    "        self.wih += self.lr * np.dot((hidden_errors*hidden_outputs*(1.0-hidden_outputs)),\n",
    "                                       np.transpose(inputs))\n",
    "        pass\n",
    "        \n",
    "        #查询神经网络\n",
    "    def query(self, input_list):   # 转换输入列表到二维数\n",
    "        inputs = np.array(input_list, ndmin=2).T                     #计算到隐藏层的信号\n",
    "        hidden_inputs = np.dot(self.wih, inputs)                     #计算隐藏层输出的信号\n",
    "        hidden_outputs = self.activation_function(hidden_inputs)        #计算到输出层的信号\n",
    "        final_inputs = np.dot(self.who, hidden_outputs)\n",
    "        final_outputs = self.activation_function(final_inputs)\n",
    "        return final_outputs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0c9820ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义神经网络训练模型，使用训练集对模型进行epochs次训练，不断调整权重\n",
    "input_nodes = 784              #输入层神经元个数\n",
    "hidden_nodes = 100             #隐藏层神经元个数\n",
    "output_nodes = 10              #输出层神经元个数\n",
    "learning_rate = 0.3            #学习率为0.3\n",
    "# 创建神经网络\n",
    "n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n",
    "\n",
    "#读取训练数据集 转化为列表\n",
    "training_data_file = open(\"source/mnist_train.csv\",'r')\n",
    "training_data_list = training_data_file.readlines();     #方法用于读取所有行，并返回列表\n",
    "#print(\"training_data_list:\",training_data_list)\n",
    "training_data_file.close()\n",
    "\n",
    "#训练次数\n",
    "epochs = 2\n",
    "for e in range(epochs):\n",
    "    #训练神经网络\n",
    "    for record in training_data_list:\n",
    "        all_values = record.split(',')                   #根据逗号，将文本数据进行拆分\n",
    "        #将文本字符串转化为实数，并创建这些数字的数组。\n",
    "        inputs = (np.asfarray(all_values[1:])/255.0 * 0.99) + 0.01\n",
    "        #创建用零填充的数组，数组的长度为output_nodes,加0.01解决了0输入造成的问题\n",
    "        targets = np.zeros(output_nodes) + 0.01\n",
    "        #使用目标标签，将正确元素设置为0.99\n",
    "        targets[int(all_values[0])] = 0.99\n",
    "        n.train(inputs,targets)\n",
    "        pass\n",
    "pass\n",
    "print('train over')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "df1501ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型测试，如果期望值和输出的值相同，就往score[]数组里面加1，否则加0，进而算准确率。\n",
    "test_data_file = open(\"source/mnist_test.csv\",'r')\n",
    "test_data_list = test_data_file.readlines()\n",
    "test_data_file.close()\n",
    " \n",
    "all_values = test_data_list[2].split(',')\n",
    "\n",
    "score = []\n",
    "print(\"***************Test start!**********************\")\n",
    "for record in test_data_list:\n",
    "    #用逗号分割将数据进行拆分\n",
    "    all_values = record.split(',')\n",
    "    #正确的答案是第一个值\n",
    "    correct_values = int(all_values[0])\n",
    "    print(correct_values,\"是正确的期望值\")\n",
    "    #做输入\n",
    "    inputs = (np.asfarray(all_values[1:])/255.0 * 0.99) + 0.01\n",
    "    #测试网络 作输入\n",
    "    outputs= n.query(inputs)\n",
    "    #找出输出的最大值的索引\n",
    "    label = np.argmax(outputs)\n",
    "    print(label,\"是网络的输出值\\n\")\n",
    "    #如果期望值和网络的输出值正确 则往score 数组里面加1 否则添加0\n",
    "    if(label == correct_values):\n",
    "        score.append(1)\n",
    "    else:\n",
    "        score.append(0)\n",
    "    pass\n",
    "pass\n",
    "print('test over')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fb0e963f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(score)\n",
    "score_array = np.asfarray(score)\n",
    "print(\"正确率是：\",(score_array.sum()/score_array.size)*100,'%')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3a2410fe",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "source": [],
    "metadata": {
     "collapsed": false
    }
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}