{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======================测试initialize_parameters======================\n",
      "W1 = [[ 0.01624345 -0.00611756 -0.00528172]\n",
      " [-0.01072969  0.00865408 -0.02301539]]\n",
      "b1 = [[0.]\n",
      " [0.]]\n",
      "W2 = [[ 0.01744812 -0.00761207]]\n",
      "b2 = [[0.]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import h5py\n",
    "import matplotlib.pyplot as plt\n",
    "import testCases\n",
    "from dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward\n",
    "import lr_utils\n",
    "\n",
    "np.random.seed(1)\n",
    "\n",
    "def initialize_parameters(n_x, n_h, n_y):\n",
    "    \"\"\"\n",
    "    此函数是为了初始化两层网络参数而使用的函数。\n",
    "    参数：\n",
    "        n_x - 输入层节点数量\n",
    "        n_h - 隐藏层节点数量\n",
    "        n_y - 输出层节点数量\n",
    "    \n",
    "    返回：\n",
    "        parameters - 包含你的参数的python字典：\n",
    "            W1 - 权重矩阵,维度为（n_h，n_x）\n",
    "            b1 - 偏向量，维度为（n_h，1）\n",
    "            W2 - 权重矩阵，维度为（n_y，n_h）\n",
    "            b2 - 偏向量，维度为（n_y，1）\n",
    "    \"\"\"\n",
    "    W1 = np.random.randn(n_h, n_x) * 0.01\n",
    "    b1 = np.zeros((n_h, 1))\n",
    "    W2 = np.random.randn(n_y, n_h) * 0.01\n",
    "    b2 = np.zeros((n_y, 1))\n",
    "    \n",
    "    assert(W1.shape == (n_h, n_x))\n",
    "    assert(b1.shape == (n_h, 1))\n",
    "    assert(W2.shape == (n_y, n_h))\n",
    "    assert(b2.shape == (n_y, 1))\n",
    "    \n",
    "    parameters = {\n",
    "        \"W1\": W1,\n",
    "        \"b1\": b1,\n",
    "        \"W2\": W2,\n",
    "        \"b2\": b2\n",
    "    }\n",
    "    return parameters\n",
    "\n",
    "#测试\n",
    "print(\"======================测试initialize_parameters======================\")\n",
    "parameters = initialize_parameters(3,2,1)\n",
    "print(\"W1 = \" + str(parameters[\"W1\"]))\n",
    "print(\"b1 = \" + str(parameters[\"b1\"]))\n",
    "print(\"W2 = \" + str(parameters[\"W2\"]))\n",
    "print(\"b2 = \" + str(parameters[\"b2\"]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======================测试initialize_parameters_deep=======================\n",
      "W1 = [[ 0.79989897  0.19521314  0.04315498 -0.83337927 -0.12405178]\n",
      " [-0.15865304 -0.03700312 -0.28040323 -0.01959608 -0.21341839]\n",
      " [-0.58757818  0.39561516  0.39413741  0.76454432  0.02237573]\n",
      " [-0.18097724 -0.24389238 -0.69160568  0.43932807 -0.49241241]]\n",
      "b1 = [[0.]\n",
      " [0.]\n",
      " [0.]\n",
      " [0.]]\n",
      "W2 = [[-0.59252326 -0.10282495  0.74307418  0.11835813]\n",
      " [-0.51189257 -0.3564966   0.31262248 -0.08025668]\n",
      " [-0.38441818 -0.11501536  0.37252813  0.98805539]]\n",
      "b2 = [[0.]\n",
      " [0.]\n",
      " [0.]]\n"
     ]
    }
   ],
   "source": [
    "def initialize_parameters_deep(layers_dims):\n",
    "    \"\"\"\n",
    "    此函数是为了初始化多层网络参数而使用的函数。\n",
    "    参数：\n",
    "        layers_dims - 包含我们网络中每个图层的节点数量的列表\n",
    "    \n",
    "    返回：\n",
    "        parameters - 包含参数“W1”，“b1”，...，“WL”，“bL”的字典：\n",
    "                     W1 - 权重矩阵，维度为（layers_dims [1]，layers_dims [1-1]）\n",
    "                     bl - 偏向量，维度为（layers_dims [1]，1）\n",
    "    \"\"\"\n",
    "    np.random.seed(3)\n",
    "    parameters = {}\n",
    "    L = len(layers_dims)\n",
    "    \n",
    "    for l in range(1,L):\n",
    "        parameters[\"W\" + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) / np.sqrt(layers_dims[l - 1])\n",
    "        parameters[\"b\" + str(l)] = np.zeros((layers_dims[l], 1))\n",
    "        \n",
    "        #确保我要的数据的格式是正确的\n",
    "        assert(parameters[\"W\" + str(l)].shape == (layers_dims[l], layers_dims[l-1]))\n",
    "        assert(parameters[\"b\" + str(l)].shape == (layers_dims[l], 1))\n",
    "        \n",
    "    return parameters\n",
    "\n",
    "#测试\n",
    "print(\"======================测试initialize_parameters_deep=======================\")\n",
    "layers_dims = [5,4,3]\n",
    "parameters = initialize_parameters_deep(layers_dims)\n",
    "print(\"W1 = \" + str(parameters[\"W1\"]))\n",
    "print(\"b1 = \" + str(parameters[\"b1\"]))\n",
    "print(\"W2 = \" + str(parameters[\"W2\"]))\n",
    "print(\"b2 = \" + str(parameters[\"b2\"]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================测试linear_forward================\n",
      "Z =[[ 3.26295337 -1.23429987]]\n",
      "======================测试linear_activation_forward======================\n",
      "sigmoid, A = [[0.96890023 0.11013289]]\n",
      "ReLU, A = [[3.43896131 0.        ]]\n"
     ]
    }
   ],
   "source": [
    "def linear_forward(A,W,b):\n",
    "    \"\"\"\n",
    "    两层模型的前向传播\n",
    "     实现前向传播的线性部分。\n",
    "\n",
    "    参数：\n",
    "        A - 来自上一层（或输入数据）的激活，维度为(上一层的节点数量，示例的数量）\n",
    "        W - 权重矩阵，numpy数组，维度为（当前图层的节点数量，前一图层的节点数量）\n",
    "        b - 偏向量，numpy向量，维度为（当前图层节点数量，1）\n",
    "\n",
    "    返回：\n",
    "         Z - 激活功能的输入，也称为预激活参数\n",
    "         cache - 一个包含“A”，“W”和“b”的字典，存储这些变量以有效地计算后向传递\n",
    "    \"\"\"\n",
    "    Z = np.dot(W,A) + b\n",
    "    assert(Z.shape == (W.shape[0],A.shape[1]))\n",
    "    cache = (A,W,b)\n",
    "    \n",
    "    return Z,cache\n",
    "\n",
    "#测试\n",
    "print(\"==================测试linear_forward================\")\n",
    "A,W,b = testCases.linear_forward_test_case()\n",
    "Z,linear_cache = linear_forward(A,W,b)\n",
    "print(\"Z =\" + str(Z))\n",
    "\n",
    "def linear_activation_forward(A_prev, W, b, activation):\n",
    "    \"\"\"\n",
    "    两层模型的前向传播\n",
    "     实现LINEAR-> ACTIVATION 这一层的前向传播\n",
    "\n",
    "    参数：\n",
    "        A_prev - 来自上一层（或输入层）的激活，维度为(上一层的节点数量，示例数）\n",
    "        W - 权重矩阵，numpy数组，维度为（当前层的节点数量，前一层的大小）\n",
    "        b - 偏向量，numpy阵列，维度为（当前层的节点数量，1）\n",
    "        activation - 选择在此层中使用的激活函数名，字符串类型，【\"sigmoid\" | \"relu\"】\n",
    "\n",
    "    返回：\n",
    "        A - 激活函数的输出，也称为激活后的值\n",
    "        cache - 一个包含“linear_cache”和“activation_cache”的字典，\n",
    "                以有效地计算后向传递\n",
    "    \"\"\"\n",
    "    if activation == \"sigmoid\":\n",
    "        Z, linear_cache = linear_forward(A_prev,W,b)\n",
    "        A, activation_cache = sigmoid(Z)\n",
    "    elif activation == \"relu\":\n",
    "        Z, linear_cache = linear_forward(A_prev,W,b)\n",
    "        A, activation_cache = relu(Z)\n",
    "    \n",
    "    assert(A.shape == (W.shape[0],A_prev.shape[1]))\n",
    "    cache = (linear_cache,activation_cache)\n",
    "    \n",
    "    return A,cache\n",
    "\n",
    "#测试\n",
    "print(\"======================测试linear_activation_forward======================\")\n",
    "A_prev,W,b = testCases.linear_activation_forward_test_case()\n",
    "\n",
    "A,linear_activation_cache = linear_activation_forward(A_prev,W,b,activation=\"sigmoid\")\n",
    "print(\"sigmoid, A = \" + str(A))\n",
    "A,linear_activation_cache = linear_activation_forward(A_prev,W,b,activation=\"relu\")\n",
    "print(\"ReLU, A = \" + str(A))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=================测试L_model_forward=================\n",
      "AL = [[0.17007265 0.2524272 ]]\n",
      "caches 的长度是 = 2\n"
     ]
    }
   ],
   "source": [
    "def L_model_forward(X,parameters):\n",
    "    \"\"\"\n",
    "    多层模型的前向传播\n",
    "     实现[LINEAR-> RELU] *（L-1） - > LINEAR-> SIGMOID计算前向传播，\n",
    "     也就是多层网络的前向传播，为后面每一层都执行LINEAR和ACTIVATION\n",
    "    \n",
    "    参数：\n",
    "        X - 数据，numpy数组，维度为（输入节点数量，示例数）\n",
    "        parameters - initialize_parameters_deep（）的输出\n",
    "    \n",
    "    返回：\n",
    "        AL - 最后的激活值\n",
    "        caches - 包含以下内容的缓存列表：\n",
    "                 linear_relu_forward（）的每个cache（有L-1个，索引为从0到L-2）\n",
    "                 linear_sigmoid_forward（）的cache（只有一个，索引为L-1）\n",
    "    \"\"\"\n",
    "    caches = []\n",
    "    A = X\n",
    "    L = len(parameters) // 2   #整除，返回整数\n",
    "    for l in range(1,L):\n",
    "        A_prev = A\n",
    "        A,cache = linear_activation_forward(A_prev,parameters[\"W\" + str(l)], parameters[\"b\" + str(l)], \"relu\")\n",
    "        caches.append(cache)\n",
    "        \n",
    "    AL,cache = linear_activation_forward(A,parameters[\"W\" + str(L)],parameters[\"b\" + str(L)],\"sigmoid\")\n",
    "    caches.append(cache)\n",
    "    \n",
    "    assert(AL.shape == (1,X.shape[1]))\n",
    "    \n",
    "    return AL,caches\n",
    "\n",
    "#测试\n",
    "print(\"=================测试L_model_forward=================\")\n",
    "X,parameters = testCases.L_model_forward_test_case()\n",
    "AL,caches = L_model_forward(X,parameters)\n",
    "print(\"AL = \" + str(AL))\n",
    "print(\"caches 的长度是 = \" + str(len(caches)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=================测试compute_cost===============\n",
      "cost = 0.414931599615397\n"
     ]
    }
   ],
   "source": [
    "def compute_cost(AL,Y):\n",
    "    \"\"\"\n",
    "    计算成本\n",
    "    \n",
    "    参数：\n",
    "        AL - 与标签预测相对应的概率向量，维度为（1，示例数量）\n",
    "        Y - 标签向量（例如：如果不是猫，则为0，如果是猫则为1），维度为（1，数量）\n",
    "\n",
    "    返回：\n",
    "        cost - 交叉熵成本\n",
    "    \"\"\"\n",
    "    m = Y.shape[1]\n",
    "    cost = -np.sum(np.multiply(np.log(AL),Y) + np.multiply(np.log(1-AL),1-Y)) / m\n",
    "    \n",
    "    cost = np.squeeze(cost)\n",
    "    assert(cost.shape == ())\n",
    "    \n",
    "    return cost\n",
    "\n",
    "#测试\n",
    "print(\"=================测试compute_cost===============\")\n",
    "Y,AL = testCases.compute_cost_test_case()\n",
    "print(\"cost = \" + str(compute_cost(AL,Y)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "====================测试linear_backward====================\n",
      "dA_prev = [[ 0.51822968 -0.19517421]\n",
      " [-0.40506361  0.15255393]\n",
      " [ 2.37496825 -0.89445391]]\n",
      "dW = [[-0.10076895  1.40685096  1.64992505]]\n",
      "db = [[0.50629448]]\n",
      "===================测试linear_activation_backward======================\n",
      "sigmoid: \n",
      "dA_prev = [[ 0.11017994  0.01105339]\n",
      " [ 0.09466817  0.00949723]\n",
      " [-0.05743092 -0.00576154]]\n",
      "dW = [[ 0.10266786  0.09778551 -0.01968084]]\n",
      "db = [[-0.05729622]]\n",
      "\n",
      "relu: \n",
      "dA_prev = [[ 0.44090989 -0.        ]\n",
      " [ 0.37883606 -0.        ]\n",
      " [-0.2298228   0.        ]]\n",
      "dW = [[ 0.44513824  0.37371418 -0.10478989]]\n",
      "db = [[-0.20837892]]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "def linear_backward(dZ,cache):\n",
    "    \"\"\"\n",
    "    两层模型的后向传播\n",
    "     为单层实现后向传播的线性部分（第L层）\n",
    "\n",
    "    参数：\n",
    "         dZ - 相对于（当前第l层的）线性输出的成本梯度\n",
    "         cache - 来自当前层前向传播的值的元组（A_prev，W，b）\n",
    "\n",
    "    返回：\n",
    "         dA_prev - 相对于激活（前一层l-1）的成本梯度，与A_prev维度相同\n",
    "         dW - 相对于W（当前层l）的成本梯度，与W的维度相同\n",
    "         db - 相对于b（当前层l）的成本梯度，与b维度相同\n",
    "    \"\"\"\n",
    "    A_prev,W,b = cache\n",
    "    m = A_prev.shape[1]\n",
    "    dW = np.dot(dZ,A_prev.T) / m\n",
    "    db = np.sum(dZ,axis=1,keepdims=True) / m\n",
    "    dA_prev = np.dot(W.T,dZ)\n",
    "    \n",
    "    assert(dA_prev.shape == A_prev.shape)\n",
    "    assert(dW.shape == W.shape)\n",
    "    assert(db.shape == b.shape)\n",
    "    \n",
    "    return dA_prev,dW,db\n",
    "\n",
    "#测试\n",
    "print(\"====================测试linear_backward====================\")\n",
    "dZ,linear_cache = testCases.linear_backward_test_case()\n",
    "\n",
    "dA_prev,dW,db = linear_backward(dZ,linear_cache)\n",
    "print(\"dA_prev = \" + str(dA_prev))\n",
    "print(\"dW = \" + str(dW))\n",
    "print(\"db = \" + str(db))\n",
    "\n",
    "def linear_activation_backward(dA,cache,activation=\"relu\"):\n",
    "    \"\"\"\n",
    "    两层模型的后向传播\n",
    "     实现LINEAR-> ACTIVATION层的后向传播。\n",
    "    \n",
    "    参数：\n",
    "         dA - 当前层l的激活后的梯度值\n",
    "         cache - 存储用于有效计算反向传播的值的元组（值为linear_cache，activation_cache）\n",
    "         activation - 要在此层中使用的激活函数名，字符串类型，[\"sigmoid\" | \"relu\"]\n",
    "         \n",
    "    返回：\n",
    "         dA_prev - 相对于激活（前一层l-1）的成本梯度值，与A_prev维度相同\n",
    "         dW - 相对于W（当前层l）的成本梯度值，与W的维度相同\n",
    "         db - 相对于b（当前层l）的成本梯度值，与b的维度相同\n",
    "    \"\"\"\n",
    "    \n",
    "    linear_cache, activation_cache = cache\n",
    "    if activation == \"relu\":\n",
    "        dZ = relu_backward(dA,activation_cache)\n",
    "        dA_prev,dW,db = linear_backward(dZ, linear_cache)\n",
    "    elif activation == \"sigmoid\":\n",
    "        dZ = sigmoid_backward(dA, activation_cache)\n",
    "        dA_prev,dW,db = linear_backward(dZ,linear_cache)\n",
    "        \n",
    "    return dA_prev,dW,db\n",
    "\n",
    "#测试\n",
    "print(\"===================测试linear_activation_backward======================\")\n",
    "AL,linear_activation_cache = testCases.linear_activation_backward_test_case()\n",
    "\n",
    "dA_prev,dW,db = linear_activation_backward(AL,linear_activation_cache,activation=\"sigmoid\")\n",
    "print(\"sigmoid: \")\n",
    "print(\"dA_prev = \" + str(dA_prev))\n",
    "print(\"dW = \" + str(dW))\n",
    "print(\"db = \" + str(db) + \"\\n\")\n",
    "\n",
    "dA_prev,dW,db = linear_activation_backward(AL,linear_activation_cache,activation=\"relu\")\n",
    "print(\"relu: \")\n",
    "print(\"dA_prev = \" + str(dA_prev))\n",
    "print(\"dW = \" + str(dW))\n",
    "print(\"db = \" + str(db) + \"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================测试L_model_backward================\n",
      "dW1 = [[0.41010002 0.07807203 0.13798444 0.10502167]\n",
      " [0.         0.         0.         0.        ]\n",
      " [0.05283652 0.01005865 0.01777766 0.0135308 ]]\n",
      "db1 = [[-0.22007063]\n",
      " [ 0.        ]\n",
      " [-0.02835349]]\n",
      "dA1 = [[ 0.          0.52257901]\n",
      " [ 0.         -0.3269206 ]\n",
      " [ 0.         -0.32070404]\n",
      " [ 0.         -0.74079187]]\n"
     ]
    }
   ],
   "source": [
    "def L_model_backward(AL,Y,caches):\n",
    "    \"\"\"\n",
    "    多层模型后向传播\n",
    "     对[LINEAR-> RELU] *（L-1） - > LINEAR - > SIGMOID组执行反向传播，就是多层网络的向后传播\n",
    "    \n",
    "    参数：\n",
    "        AL - 概率向量，正向传播的输出（L_model_forward（））\n",
    "        Y - 标签向量（例如：如果不是猫，则为0，如果是猫则为1），维度为（1，数量）\n",
    "        caches - 包含以下内容的cache列表：\n",
    "                 linear_activation_forward（\"relu\"）的cache，不包含输出层\n",
    "                 linear_activation_forward（\"sigmoid\"）的cache\n",
    "    \n",
    "    返回：\n",
    "        grads - 具有梯度值的字典\n",
    "              grads [“dA”+ str（l）] = ...\n",
    "              grads [“dW”+ str（l）] = ...\n",
    "              grads [“db”+ str（l）] = ...\n",
    "    \"\"\"\n",
    "    grads = {}\n",
    "    L = len(caches)\n",
    "    m = AL.shape[1]\n",
    "    Y = Y.reshape(AL.shape)\n",
    "    dAL = -(np.divide(Y,AL) - np.divide(1-Y, 1-AL))\n",
    "    \n",
    "    current_cache = caches[L-1]\n",
    "    grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, \"sigmoid\")\n",
    "    \n",
    "    for l in reversed(range(L-1)):\n",
    "        current_cache = caches[l]\n",
    "        dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 2)], current_cache, \"relu\")\n",
    "        grads[\"dA\" + str(l + 1)] = dA_prev_temp\n",
    "        grads[\"dW\" + str(l + 1)] = dW_temp\n",
    "        grads[\"db\" + str(l + 1)] = db_temp\n",
    "        \n",
    "    return grads\n",
    "\n",
    "#测试\n",
    "print(\"================测试L_model_backward================\")\n",
    "AL,Y_assess,caches = testCases.L_model_backward_test_case()\n",
    "grads = L_model_backward(AL, Y_assess, caches)\n",
    "print(\"dW1 = \" + str(grads[\"dW1\"]))\n",
    "print(\"db1 = \" + str(grads[\"db1\"]))\n",
    "print(\"dA1 = \" + str(grads[\"dA1\"]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=================测试update_parameters==================\n",
      "W1 = [[-0.59562069 -0.09991781 -2.14584584  1.82662008]\n",
      " [-1.76569676 -0.80627147  0.51115557 -1.18258802]\n",
      " [-1.0535704  -0.86128581  0.68284052  2.20374577]]\n",
      "b1 = [[-0.04659241]\n",
      " [-1.28888275]\n",
      " [ 0.53405496]]\n",
      "W2 = [[-0.55569196  0.0354055   1.32964895]]\n",
      "b2 = [[-0.84610769]]\n"
     ]
    }
   ],
   "source": [
    "def update_parameters(parameters, grads, learning_rate):\n",
    "    \"\"\"\n",
    "    使用梯度下降更新参数\n",
    "    W[l]=W[l]−α dW[l]\n",
    "    b[l]=b[l]−α db[l]\n",
    "    \n",
    "    参数：\n",
    "     parameters - 包含你的参数的字典\n",
    "     grads - 包含梯度值的字典，是L_model_backward的输出\n",
    "    \n",
    "    返回：\n",
    "     parameters - 包含更新参数的字典\n",
    "                   参数[“W”+ str（l）] = ...\n",
    "                   参数[“b”+ str（l）] = ...\n",
    "    \"\"\"\n",
    "    \n",
    "    L = len(parameters) // 2\n",
    "    for l in range(L):\n",
    "        parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n",
    "        parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n",
    "        \n",
    "    return parameters\n",
    "\n",
    "#测试\n",
    "print(\"=================测试update_parameters==================\")\n",
    "parameters, grads = testCases.update_parameters_test_case()\n",
    "parameters = update_parameters(parameters,grads,0.1)\n",
    "\n",
    "print(\"W1 = \" + str(parameters[\"W1\"]))\n",
    "print(\"b1 = \" + str(parameters[\"b1\"]))\n",
    "print(\"W2 = \" + str(parameters[\"W2\"]))\n",
    "print(\"b2 = \" + str(parameters[\"b2\"]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第  0  次迭代，成本值为：  0.6930497356599891\n",
      "第  100  次迭代，成本值为：  0.6464320953428849\n",
      "第  200  次迭代，成本值为：  0.6325140647912677\n",
      "第  300  次迭代，成本值为：  0.6015024920354665\n",
      "第  400  次迭代，成本值为：  0.5601966311605748\n",
      "第  500  次迭代，成本值为：  0.515830477276473\n",
      "第  600  次迭代，成本值为：  0.47549013139433266\n",
      "第  700  次迭代，成本值为：  0.43391631512257495\n",
      "第  800  次迭代，成本值为：  0.400797753620389\n",
      "第  900  次迭代，成本值为：  0.3580705011323798\n",
      "第  1000  次迭代，成本值为：  0.3394281538366412\n",
      "第  1100  次迭代，成本值为：  0.3052753636196264\n",
      "第  1200  次迭代，成本值为：  0.2749137728213017\n",
      "第  1300  次迭代，成本值为：  0.2468176821061485\n",
      "第  1400  次迭代，成本值为：  0.19850735037466108\n",
      "第  1500  次迭代，成本值为：  0.174483181125566\n",
      "第  1600  次迭代，成本值为：  0.17080762978096897\n",
      "第  1700  次迭代，成本值为：  0.11306524562164709\n",
      "第  1800  次迭代，成本值为：  0.09629426845937147\n",
      "第  1900  次迭代，成本值为：  0.08342617959726864\n",
      "第  2000  次迭代，成本值为：  0.07439078704319083\n",
      "第  2100  次迭代，成本值为：  0.06630748132267932\n",
      "第  2200  次迭代，成本值为：  0.05919329501038171\n",
      "第  2300  次迭代，成本值为：  0.05336140348560558\n",
      "第  2400  次迭代，成本值为：  0.0485547856287702\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEWCAYAAACJ0YulAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xl8VPW9//HXJwlZCCGBBFnCvmkBWSSAIlrrVau2irvgiijWBbfe9v6st7e19na5tq4VW6EiqCha64JL61Z3ZQmyCcgqS1jDFvYl8Pn9MSfpGCcQMJOTZN7Px2MemXPO95z5nBmY95zte8zdERERAUgKuwAREak9FAoiIlJOoSAiIuUUCiIiUk6hICIi5RQKIiJSTqEg34qZLTOz00J67e1m1jGM1xaprxQKUme5eyN3Xxp2HQBm5mbWOYTX7W1m081sZ/C390HaNjWzl8xsh5ktN7PLKky/LBi/w8xeNrOmUdO2V3jsN7M/BdPaB+sfPf1/4rfWEk8KBamVzCw57BrKmFlK2DXEYmapwCvA00ATYDzwSjA+llHAXqA5cDnwZzPrHiyrO/AYcGUwfSfwaNmMQQA3cvdGwfRdwN8qLD8nqt2vq2k1pYYpFKTamFmSmd1pZkvMbKOZPV/h1+bfzGytmZWY2YdlX0jBtHFm9mcze8PMdgDfC8aNMrPXzWybmU0xs05R85T/Oq9C2zPMbEHw2o+a2Qdmdl0l63G3mb1gZk+b2VZgmJn1N7PPzGyLma0xs0fKvnzN7MNg1lnBr+RLg/E/NLOZwTyfmlnPany7AU4BUoAH3X2Puz8MGHBqjHXKBC4E/sfdt7v7x8AkIiEAkZB41d0/dPftwP8AF5hZVozXvQhYD3xUzesjtYBCQarTrcB5wHeBVsBmIr9Oy/wD6AIcBXwOTKgw/2XAb4As4ONg3FDgV0R+CS8OplcmZlszywNeAH4G5AILgIGHWJfBwTw5QZ37gTuAPOAE4D+AmwDc/eRgnl7Br+TnzOw4YCzwo+A1HwMmmVlarBczs9lBeMR6PBprHqA7MNu/3lfN7GB8RV2B/e6+MGrcrKi23YNhgnVaQmSromuMZV0NPOnf7CNnuZkVmdkTwXsudZBCQarTj4D/dvcid98D3A1cVLb7xd3Huvu2qGm9zCw7av5X3P0Tdz/g7ruDcS+6+1R3LyXy5VzpPvODtD0bmOvuLwbTHgbWHmJdPnP3l4Nadrn7dHef7O6l7r6MyJf8dw8y/wjgMXef4u773X08sAc4PlZjd+/p7jmVPG6q5DUaASUVxpUQCdXDbVulZZlZWyLrPT5q9AagH9AO6BvMUzHwpY6olftKpc5qB7xkZgeixu0HmpvZWiK/3C8GmgFlbfL495fRyhjLjP7y3knky6sylbVtFb1sd3czKzr4qny9FjPrCtwPFAANifzfmX6Q+dsBV5vZLVHjUoNaqst2oHGFcY2BbUfQtqrLugr42N2/KhsR7G4qDAbXmdlIYI2ZNXb3rVVZEak9tKUg1WklcFaFX7np7r6KyK6hwcBpQDbQPpjHouaPV5e9a4DWZQNmZtHDlahYy5+BL4Eu7t4YuIuv117RSuA3Fd6Lhu7+bKzGZjY3xhk+ZY+/VPIac4GewfqU6RmMr2ghkGJmXaLG9YpqOzcYLqunI5AWzBftKr6+lRBL2Xt3sPdHaimFglSnvwC/MbN2AGbWzMwGB9OyiOw+2Ujkl/Zva7Cu14Fjzey8YFfWzUCLw1xGFrAV2G5mxwA3Vpi+Doi+ZmIMcIOZDbCITDP7QSUHbnH37tFn+FR43FBJTe8T2RK71czSgl/oAP+KsfwdwIvAPUEtJxIJ6aeCJhOAc8zspOCg9D1EdseVbymY2UAgnwpnHQXreHRwokEukd1z77t7xd1RUgcoFKQ6PUTkjJa3zGwbMBkYEEx7ElgOrALmBdNqhLtvILLb6l4iodSNyO6OPYexmJ8Q2drZRuQL/7kK0+8GxgcHhi9x90IixxUeIXLAfTEw7MjX4pvcfS+RA/tXAVuA4cB5wXjM7C4z+0fULDcBGUTOHHoWuNHd5wbLmgvcQCQc1hMJwYrHMq6mQlAEOgL/JPLefEHkfR1aTaspNcx0kx1JNGaWBBQBl7v7e2HXI1KbaEtBEoKZfd/McoJTQsuOB9TY1opIXaFQkERxArCEyOmT5xDZzbIr3JJEah/tPhIRkXLaUhARkXJ17uK1vLw8b9++fdhliIjUKdOnT9/g7s0O1a7OhUL79u0pLCw8dEMRESlnZsur0k67j0REpJxCQUREyikURESkXFxDwczODG5sstjM7owx/YHgJiQzzWyhmW2JZz0iInJwcTvQbJHbKY4CTifSpcA0M5vk7vPK2rj7HVHtbwH6xKseERE5tHhuKfQHFrv70qCDrolEemWszFAinXSJiEhI4hkK+Xz9RiVFwbhvCLpa7kCMLn+D6debWaGZFRYXF1d7oSIiEhHPUIh1g43K+tQYArzg7vtjTXT30e5e4O4FzZod8tqLmL7asIP/++eXqFsPEZHKxTMUioA2UcOtgdWVtB1CnHcdvT1vLX9+fwl/eHNBPF9GRKROi+cVzdOALmbWgciNVYYQuUnJ15jZ0UAT4LM41sKIkzry1YadPPr+ElrmZHDl8e3i+XIiInVS3ELB3UuD2wO+CSQDY919rpndAxS6+6Sg6VBgosd5v46Z8evB3SnetptfvvIFzbPSOKP74d6RUUSkfqtzXWcXFBT4t+n7aOfeUoaOmcKXa7byzIjj6duuSTVWJyJSO5nZdHcvOFS7hLuiuWFqCmOvLqBldjrXjZ/G0uLtYZckIlJrJFwoAOQ2SmP88P4kmXH1E1NZv2132CWJiNQKCRkKAO1yMxk7rB8btu3l2nGF7NhTGnZJIiKhS9hQAOjVJodRl/dh3pqt3DThc/btPxB2SSIioUroUAA49Zjm/Oa8HnywsJi7Xpyji9tEJKHVuTuvxcOQ/m1ZXbKbh99dRMucDH58etewSxIRCYVCIXDHaV1YW7IrEgzZ6Qzt3zbskkREapxCIWBm/Ob8Y1m3dQ8/f/kLmjdO49RjmoddlohIjUr4YwrRGiQn8ejlx9GtZWNunjCDWSt1zx8RSSwKhQoy01IYO6wfzbLSGD5uGkt0cZuIJBCFQgzNstIYd00/Drhz+v0fcNmYyUyYspyN2/eEXZqISFwlXN9Hh2Plpp08X7iS12av4asNO0hOMgZ2yuUHx7bk+91b0CQztUbqEBH5tqra95FCoQrcnflrtvH6nNW8NnsNyzfuJCXJGNg5jx/2bMn3u7Ugu2GDGq1JRORwKBTixN2Zu3orr81ew+tzVrNy0y4aJBuDOufxg56tOL1bc7IzFBAiUrsoFGqAuzNnVQmvz17Da7PXsGrLLlKTk7jpe5245dQuJCfFuiOpiEjNUyjUMHdnVlEJj3/8Fa/OWs2JnXN58NI+NMtKC7s0ERHdT6GmmRm92+Tw8JDe3HtRT6Yv38zZD3/Ep4s3hF2aiEiVKRSqmZlxSUEbXrl5EI3TU7j88Sk8+M5C9h+oW1tkIpKYFApxcnSLLCaNHMT5vfN58J1FXDV2CsXbdJ2DiNRuCoU4ykxL4b5Leml3kojUGQqFONPuJBGpSxQKNUS7k0SkLohrKJjZmWa2wMwWm9mdlbS5xMzmmdlcM3smnvWETbuTRKS2i1somFkyMAo4C+gGDDWzbhXadAF+Bpzo7t2B2+NVT20Ra3fSQ+8s0m1ARaRWiOeWQn9gsbsvdfe9wERgcIU2I4BR7r4ZwN3Xx7GeWiV6d9ID7yzk7klzFQwiErp43nktH1gZNVwEDKjQpiuAmX0CJAN3u/s/Ky7IzK4Hrgdo27b+3CazbHdSXlYaoz9cCsDd53bHTN1jiEg44hkKsb7ZKv4UTgG6AKcArYGPzKyHu3/tlmfuPhoYDZFuLqq/1PCYGT876xgABYOIhC6eoVAEtIkabg2sjtFmsrvvA74yswVEQmJaHOuqdSoGgwO/UjCISAjiGQrTgC5m1gFYBQwBLqvQ5mVgKDDOzPKI7E5aGseaaq1YWwwKBhGpaXELBXcvNbORwJtEjheMdfe5ZnYPUOjuk4JpZ5jZPGA/8FN33xivmmq7smAw4DEFg4iEIJ5bCrj7G8AbFcb9Iuq5Az8OHkIkGO4MthgUDCJS0+IaCnJkKgaDO9wzWMEgIvGnUKilYm0xKBhEJN4UCrVYeTAYPPaBgkFE4k+hUMuZGXeeGWwxfLAUx/n14B4KBhGJC4VCHVAxGAAFg4jEhUKhjqgYDElmOitJRKqdQqEOKQsG98gFblnpKfz0+8eEXZaI1CMKhTqm7AK3bbtLGfXeErIzGnD9yZ3CLktE6gmFQh1kZvzveT3Yunsfv33jS7IzGnBpv/rTe6yIhEehUEclJxkPXNKb7btL+dmLc8hKb8DZx7YMuywRqeN0j+Y6LDUlib9c0Zfj2jbhtokz+HBhcdgliUgdp1Co4zJSk3l8WD86H5XFj56azvTlm8IuSUTqMIVCPZCd0YAnh/enRXY61zwxjflrtoZdkojUUQqFeqJZVhpPXdufhqkpXPn4VJZt2BF2SSJSBykU6pHWTRry9HX9OeDOFY9PYW3J7rBLEpE6RqFQz3Q+Kovx1/Rny859XPn4FDbv2Bt2SSJShygU6qFjW2cz5qoClm/aybAnprJ9T2nYJYlIHaFQqKdO6JTLo5cdxxertzJifCG79+0PuyQRqQMUCvXYad2a88eLe/LZ0o3c8uwMSvcfCLskEanlFAr13Pl9WnPP4O68PW8dtz03k30KBhE5CHVzkQCuOqE9u/ft57dvfEnp/gP8aehxpKbo94CIfFNcvxnM7EwzW2Bmi83szhjTh5lZsZnNDB7XxbOeRHb9yZ345TndeHPuOm58ejp7SnWMQUS+KW6hYGbJwCjgLKAbMNTMusVo+py79w4ef41XPQLXnNiB/z2vB+9+uZ4RT07XwWcR+YZ4bin0Bxa7+1J33wtMBAbH8fWkCq44vh33XtiTjxYVM3zcNHbu1emqIvJv8QyFfGBl1HBRMK6iC81stpm9YGZtYi3IzK43s0IzKywuVk+g39Yl/dpw38W9mLx0I8OemKbrGESkXDxDIdbNg73C8KtAe3fvCbwDjI+1IHcf7e4F7l7QrFmzai4zMV1wXGseHNKH6cs3c/XYqWzdvS/skkSkFohnKBQB0b/8WwOroxu4+0Z33xMMjgH6xrEeqeDcXq14ZGgfZq3cwpWPT6Vkp4JBJNHFMxSmAV3MrIOZpQJDgEnRDcws+lZh5wLz41iPxHDWsS358xV9mb96K5f9dbL6ShJJcHELBXcvBUYCbxL5sn/e3eea2T1mdm7Q7FYzm2tms4BbgWHxqkcqd3q35jx2VV8Wrd/O0DGT2bB9z6FnEpF6ydwr7uav3QoKCrywsDDsMuqljxdt4Lonp9GmSUMmjBjAUVnpYZckItXEzKa7e8Gh2umyVik3qEse467pz6otuxjy2GTdj0EkASkU5GuO75jLk8P7s37bHi4d/Rmrt+wKuyQRqUEKBfmGgvZNeera/mzavpehYyazpkTBIJIoFAoSU5+2TXiyLBhGa1eSSKJQKEil+rRtwvhr+7Mh2GJQMIjUfwoFOajj2jZh/PD+FG/bw9Axk1m3VcEgUp8pFOSQ+rZrwvjh/Vi/dTdDRysYROozhYJUSd92TRk/vD/rtu5m6JjJrFcwiNRLCgWpsoL2TRk3vD9rS4Jg2KZgEKlvFApyWPq1j2wxrCmJ7Eoq3qYuMUTqE4WCHLZ+7Zsy7pogGMYoGETqE4WCHJH+HZryxLB+rNq8i8sUDCL1hkJBjtiAjrk8cU0/ijbv4vK/qndVkfpAoSDfyvEdcxk7rB8rNu3k8jFT2KhgEKnTFAryrZ3QKRIMyzft4OLHPmNJ8fawSxKRI6RQkGoxsFMeTw4fwJad+zjvkU94d/66sEsSkSOgUJBq079DUyaNPJF2eQ257slCHn53EQcO1K2bOIkkOoWCVKvWTRrywg0DOa93Pve/vZAbnp7O9j2lYZclIlWkUJBql94gmfsv6cUvftiNd79cz3mjPmGpjjOI1AkKBYkLM2P4oA6Rm/Xs2MvgUZ/wry91nEGktlMoSFwN7JTHpJEn0rZpQ64dX8ifdJxBpFZTKEjclR1nGNyrFfe9vZAbJ+g4g0htFddQMLMzzWyBmS02szsP0u4iM3MzK4hnPRKejNRkHri0Nz//wXd4Z/56zh/1CV9t2BF2WSJSQdxCwcySgVHAWUA3YKiZdYvRLgu4FZgSr1qkdjAzrjupI08N78+G7Xs495GPee/L9WGXJSJR4rml0B9Y7O5L3X0vMBEYHKPdr4F7AXXOnyAGds5j0shBtGnSkOHjp/HkZ8vCLklEAvEMhXxgZdRwUTCunJn1Adq4+2sHW5CZXW9mhWZWWFxcXP2VSo1r07Qhf79xIKd9pzm/eGUuz05dEXZJIkIVQ8HMLq7KuIpNYowrP+3EzJKAB4D/PNTru/tody9w94JmzZodqrnUERmpyTxyWR++d3Qz7nppDn+fXhR2SSIJr6pbCj+r4rhoRUCbqOHWwOqo4SygB/C+mS0Djgcm6WBzYklLSebPV/RlYKdcfvrCLF6dtfrQM4lI3KQcbKKZnQWcDeSb2cNRkxoDhzqncBrQxcw6AKuAIcBlZRPdvQTIi3qt94GfuHvh4ayA1H3pDZIZc1UBw8ZO4/bnZpKaksT3u7cIuyyRhHSoLYXVQCGRg8DTox6TgO8fbEZ3LwVGAm8C84Hn3X2umd1jZud+28KlfmmYmsLYa/rRs3U2I5/5XGcliYTE3A99damZNXD3fcHzJkQODs+Od3GxFBQUeGGhNibqq5Jd+7j8r5NZuG47Twzrx4md8w49k4gckplNd/dD7p6v6jGFt82ssZk1BWYBT5jZ/d+qQpEYsjMa8NTwAXTMy+Ta8dOYsnRj2CWJJJSqhkK2u28FLgCecPe+wGnxK0sSWZPMVJ66dgD5ORkMHzeNz1dsDrskkYRR1VBIMbOWwCXAQa8pEKkOzbLSeGbE8eRlpXH12Kl8saok7JJEEkJVQ+EeIgeMl7j7NDPrCCyKX1ki0LxxOs+MOJ7G6Q244vEpfLl2a9glidR7VQoFd/+bu/d09xuD4aXufmF8SxOB/JwMnhkxgPSUZC4fM4XF67eFXZJIvVbVK5pbm9lLZrbezNaZ2d/NrHW8ixMBaJebyYQRAzAzLhszhWXqXVUkbqq6++gJItcmtCLSf9GrwTiRGtGpWSOeGTGA0gPO0DGTeXnGKvbtPxB2WSL1TlVDoZm7P+HupcFjHKBOiKRGdW2exVPX9iczLYXbn5vJyfe+x2MfLGHr7n1hlyZSb1Q1FDaY2RVmlhw8rgB0ArnUuO6tsnnr9pN5Ylg/2udm8rt/fMnA3/2LX782j6LNO8MuT6TOq+oVzW2BR4ATiPR0+ilwq7vXeH/HuqJZon2xqoS/frSUV2evAeDsY1sy4qQO9GydE3JlIrVLVa9ormoojAdud/fNwXBT4I/uPvxbV3qYFAoSy+otuxj36TKenbKCbXtKGdChKSNO6sipxxxFUlKsXtxFEkt1h8IMd+9zqHE1QaEgB7Nt9z6em7aSsR9/xeqS3XRslsmIkzpyfp980hskh12eSGiqu++jpKAjvLKFN+UQ3W6LhCErvQHXndSRD/7rezw0pDcNU5P52YtzOP2BD3Txm0gVVDUU7gM+NbNfm9k9RI4p3Bu/skS+nQbJSQzunc+rIwfx9LUD2LPvABc++ilvzV0bdmkitVpVr2h+ErgQWAcUAxe4+1PxLEykOpgZg7rk8eotg+h8VCN+9PR0Rr23mKrsNhVJRFXeBeTu84B5caxFJG6aN07nuR+dwH+9MJs/vLmAheu28X8X9tRxBpEKdFxAEkZ6g2QeGtKbo1tk8Yc3F7Bsww5GX1VA88bpYZcmUmtU9ZiCSL1gZtz8vc6MvrIvi9Zv59xHPmZ20ZawyxKpNRQKkpDO6N6Cv984kJSkJC7+y2dMmrU67JJEagWFgiSs77RszKSRJ9KrdQ63PjuDP765gAMHdABaEptCQRJabqM0nr5uAEP6teGR9xZzw9PT2bGnNOyyREKjUJCEl5qSxO8uOJZfntONd+av48I/f8rKTepcTxJTXEPBzM40swVmttjM7owx/QYzm2NmM83sYzPrFs96RCpjZlxzYgfGXdOfVVt2MXjUJ3y2RB0BS+KJWyiYWTIwCjgL6AYMjfGl/4y7H+vuvYlcIX1/vOoRqYqTuzbjlZtPJKdhAy7/62QeeHsh+3WcQRJIPLcU+gOLg/s57wUmAoOjG7h7dGc0mUS65RYJVcdmjXh15CDO79Oah95dxNAxk1lTsivsskRqRDxDIR9YGTVcFIz7GjO72cyWENlSuDXWgszsejMrNLPC4uLiuBQrEi0zLYX7LunF/Zf04otVJZz10Ee8M29d2GWJxF08QyFWJ/bf2BJw91Hu3gn4f8DPYy3I3Ue7e4G7FzRrpruASs254LjWvHbLIPJzMrjuyUJ+9epc9pTuD7sskbiJZygUAW2ihlsDB7tCaCJwXhzrETkiHZs14sWbBnLNie154pNlXPDop3y1YUfYZYnERTxDYRrQxcw6mFkqMASYFN3AzLpEDf4AWBTHekSOWFpKMr88pztjripg1ZZd/PDhj3hpRlHYZYlUu7iFgruXAiOBN4H5wPPuPtfM7jGzc4NmI81srpnNBH4MXB2vekSqw+ndmvOP206ie6ts7nhuFv/5/Cxd7Cb1SpVux1mb6HacUhuU7j/An/61mD/9axHtczP502V96N4qO+yyRCpV3bfjFJEoKclJ3HF6VyZcdzw79pZy/qhPGf/pMt28R+o8hYLIt3BCp1z+cdvJDOqSxy8nzeXqJ6axtmR32GWJHDGFgsi31DQzlcevLuDXg7sz7atNnPHAB7w0o0hbDVInKRREqoGZceUJ7fnHbSdxdIss7nhuFjc8PZ0N2/eEXZrIYVEoiFSj9nmZTLz+BP777O/w3oJiznjgQ/75xZqwyxKpMoWCSDVLTjJGnNyR14MroW94+nNunziDkp37wi5N5JAUCiJx0qV5Fi/eNJA7TuvKa7PXcMaDH/DegvVhlyVyUAoFkThqkJzEbad14eWbTyQ7owHXPDGNO/8+m227tdUgtZNCQaQG9MjP5tVbBnHjKZ14vnAlZz74EZ8u2RB2WSLfoFAQqSFpKcn8vzOP4W83DCQ1JYnLxkzhd2/M16mrUqsoFERqWN92TXjj1pMY2r8tj324lFHvLQ67JJFyKWEXIJKIMlKT+e35Pdi1t5Q/vrWQdrmZnNOrVdhliWhLQSQsZsb/XdSTfu2b8J9/m8X05ZvCLklEoSASprSUZEZfWUCr7HRGPDmdFRt3hl2SJDiFgkjImmSmMnZYPw64c824qbrITUKlUBCpBTo2a8RjV/Rlxaad3DhhOntLD4RdkiQohYJILTGgYy6/v6Anny7ZyM9fnqNTVSUUOvtIpBa5sG9rlm/cwcP/Wkz7vExuOqVz2CVJglEoiNQyd5zelWUbd3LvPxfQrmkmP+jZMuySJIFo95FILWNm3HtRTwraNeHHz8/k8xWbwy5JEohCQaQWSm+QzGNX9qV543RGjC9k5Sadqio1I66hYGZnmtkCM1tsZnfGmP5jM5tnZrPN7F0zaxfPekTqktxGaYwd1o99+w9wzbhplOzSqaoSf3ELBTNLBkYBZwHdgKFm1q1CsxlAgbv3BF4A7o1XPSJ1UeejGvHYlQUs37iDmyZMZ99+naoq8RXPLYX+wGJ3X+rue4GJwODoBu7+nruXbRdPBlrHsR6ROumETrn87oKefLJ4I//z8hc6VVXiKp5nH+UDK6OGi4ABB2l/LfCPWBPM7HrgeoC2bdtWV30idcZFfVuzbMMOHnlvMclJxn+deQzZGQ3CLkvqoXiGgsUYF/MnjpldARQA34013d1HA6MBCgoK9DNJEtKPT+/Kzr37eeLTr/jHF2v5yRlHc2m/NiQnxfqvJnJk4rn7qAhoEzXcGlhdsZGZnQb8N3Cuu++JYz0idVpSkvGLc7rx6shBdG7WiLtemsO5j3zM1K/Uu6pUn3iGwjSgi5l1MLNUYAgwKbqBmfUBHiMSCLqjuUgV9MjP5rkfHc+fhvZh8469XPLYZ9zy7AxWb9kVdmlSD8QtFNy9FBgJvAnMB55397lmdo+ZnRs0+wPQCPibmc00s0mVLE5EopgZ5/Rqxbv/eQq3/kcX3pq7llPve5+H3lnE7n37wy5P6jCra2cyFBQUeGFhYdhliNQqRZt38rs3vuT1OWvIz8ngrrO/w9nHtsBMxxskwsymu3vBodrpimaReqB1k4aMuvw4Jl5/PI0zGnDzM58zdMxk5q/ZGnZpUsdoS0Gkntl/wHl26grue2sBJbv2cWm/NhzXtgl5WWnkZaaRl5VKbmYaqSn6TZhIqrqloFAQqadKdu7jgXcW8vTk5ZQe+Ob/8+yMBuQ2SiWvURrNGqWVP89rlEavNtl0b5UdQtUSLwoFEQFg1979FG/bQ/H2PWzcvocN2/eyYfseNmzfw8bteykOnm/Ytoetu0sBSEkyXrxpID1b54RcvVSXqoaC7qcgUs9lpCbTNrchbXMbHrLt3tIDrN6yi8vGTOa2iTN57ZZBZKbpayKRaKeiiJRLTUmifV4m91/am2Ubd/CrV+eGXZLUMIWCiHzD8R1zuemUTjxfWMTrs9eEXY7UIIWCiMR0+2ld6dUmh5+9OJtVulo6YSgURCSmBslJPDykN/sPOHc8N5P9Mc5gkvpHoSAilWqXm8mvBvdg6leb+MsHS8IuR2qAQkFEDurC4/I5p1cr7n97ITNWbA67HIkzhYKIHJSZ8b/n9aBF43RumziT7XtKwy5J4kihICKHlJ3RgAeH9KZo805++YpOU63PFAoiUiX92jdl5Kld+PvnRUya9Y37ZUk9oVAQkSq79dTOHNc2h/9+aQ5Fm3eGXY7EgUJBRKosJTmJh4b0wR1unziT0v0Hwi5JqplCQUQOS5umDfn1ed0pXL6ZUe/pNNX6RqEgIoft/D6tOa93Kx7+1yKmL9dsmG+3AAANMElEQVRpqvWJQkFEjsg95/WgZXY6t02cwdbd+8IuR6qJQkFEjkjj9AY8NKQ3a0p284uXvwi7HKkmCgUROWJ92zXl1lO78PLM1Tzw9kJKdmqLoa7T3TNE5Fu5+XudmLOqhIfeXcRfPljCOb1acfmAtvRuk4OZhV2eHKa4bimY2ZlmtsDMFpvZnTGmn2xmn5tZqZldFM9aRCQ+UpKT+OvVBbx2yyAu7Nuaf8xZw/mPfsoPHv6YpycvV7cYdUzc7tFsZsnAQuB0oAiYBgx193lRbdoDjYGfAJPc/YVDLVf3aBap3bbvKeWVmat4evIK5q/ZSmZqMoP75HP5gLZ0b5UddnkJqzbco7k/sNjdlwYFTQQGA+Wh4O7Lgmm6AkaknmiUlsLlA9pxWf+2zFy5hQlTVvD36UU8M2UFvdvkcPmAtvywZysyUpPDLlViiOfuo3xgZdRwUTDusJnZ9WZWaGaFxcXF1VKciMSXmdGnbRP+eHEvpt51Gr/4YTe27d7HT1+YzYDfvsPdk+YyfflmDujmPbVKPLcUYh1hOqJP391HA6Mhsvvo2xQlIjUvu2EDhg/qwDUntmfqV5uYMGUFE6YsZ9yny8hrlMbp3Y7ijG4tOKFTLukNtAURpniGQhHQJmq4NaCuFUUSmJkxoGMuAzrmUrKrB+8vWM9b89YxaeZqnp26kszUZL57dDPO6NaC7x19FNkNG4RdcsKJZyhMA7qYWQdgFTAEuCyOrycidUh2RgMG985ncO989pTu57MlG3lr3jrenreON+asJSXJGNCxKWd0a8Hp3ZrTKicj7JITQtzOPgIws7OBB4FkYKy7/8bM7gEK3X2SmfUDXgKaALuBte7e/WDL1NlHIvXbgQPOrKItvDVvHW/NXcuS4h0A9MhvzKnHNKegXRN6tckhO0NbEYejqmcfxTUU4kGhIJJYlhRv5+0gIGas3ELZV1bnoxrRp00Ovdvm0KdNE7o2b0RKsjppqIxCQUTqnW279zGnqIQZK7cwY8VmPl+xhU079gLQMDWZnq2z6dO2CX3a5NCnbROaZaWFXHHtURuuUxARqVZZ6Q0Y2DmPgZ3zAHB3Vm7axYyVm5mxIhIUYz5cSmlwmmvrJhn0ap1D9/zGHJufTfdW2TTNTA1zFWo9hYKI1FlmRtvchrTNbcjg3pHLoHbv28/c1SXMWLGFz1dsZvaqLbw+Z035PPk5GXRv1Zge+dn0yG9Mj1bZHNU4PaxVqHUUCiJSr6Q3SKZvu6b0bde0fFzJzn3MXV3CF6tL+GLVVr5YXcLb89eVH59olpVGjyAourfK5jsts2jTpCFJSYnXoZ9CQUTqveyGX9/tBJE+muav2cqcokhYzF21lQ8WFlN2gXVGg2S6NG9E1+ZZHN08i64tIn+bN06r172/KhREJCE1SkuhX/um9Gv/7y2KXXv38+XarSxct40Fa7ezcN02PlhYzAvTi8rbNE5P4egWWZGwCP52bZ5Fk4YN6kVYKBRERAIZqcmRs5faNvna+E079rJw3bYgLCJ/X521mglT/t0teOP0FNrnZdIuN5P2uQ2/9jevUWqdCQyFgojIITTNTOX4jrkc3zG3fJy7s27rHhas28aiddtYsWknX23YwayVW3h99mqi+/lrlJZCu9yGtM/NLP/bumkG+TkZtMhOJy2l9vT3pFAQETkCZkaL7HRaZKfz3a7NvjZtb+kBVm3ZxbKNO1i+YQfLNu5k2cYdzFuzlTfnri0/ZbZMXqM08nPSaZWTQaucDFpmp5MfPG+Vk0FuZmqNHfRWKIiIVLPUlCQ65GXSIS8Tjv76tNL9kcBYtXkXq7bsYk3JblZviTxfuG4b7y8oZte+/V9fXnISLXPS+fHpXctPvY0XhYKISA1KSU6iXW7k2EMs7k7Jrn2RwNiym9Ulu8qf52bG/wpthYKISC1iZuQ0TCWnYWooty9V71EiIlJOoSAiIuUUCiIiUk6hICIi5RQKIiJSTqEgIiLlFAoiIlJOoSAiIuXq3D2azawYWH6Es+cBG6qxnLomkdc/kdcdEnv9te4R7dy92cEaQx0MhW/DzAqrcuPq+iqR1z+R1x0Se/217oe37tp9JCIi5RQKIiJSLtFCYXTYBYQskdc/kdcdEnv9te6HIaGOKYiIyMEl2paCiIgchEJBRETKJUwomNmZZrbAzBab2Z1h11OTzGyZmc0xs5lmVhh2PfFmZmPNbL2ZfRE1rqmZvW1mi4K/TcKsMV4qWfe7zWxV8PnPNLOzw6wxXsysjZm9Z2bzzWyumd0WjE+Uz76y9T+szz8hjimYWTKwEDgdKAKmAUPdfV6ohdUQM1sGFLh7QlzAY2YnA9uBJ929RzDuXmCTu/8++FHQxN3/X5h1xkMl6343sN3d/xhmbfFmZi2Blu7+uZllAdOB84BhJMZnX9n6X8JhfP6JsqXQH1js7kvdfS8wERgcck0SJ+7+IbCpwujBwPjg+Xgi/1nqnUrWPSG4+xp3/zx4vg2YD+STOJ99Zet/WBIlFPKBlVHDRRzBm1WHOfCWmU03s+vDLiYkzd19DUT+8wBHhVxPTRtpZrOD3Uv1cvdJNDNrD/QBppCAn32F9YfD+PwTJRQsxrj6v9/s30509+OAs4Cbg10Mkjj+DHQCegNrgPvCLSe+zKwR8HfgdnffGnY9NS3G+h/W558ooVAEtIkabg2sDqmWGufuq4O/64GXiOxOSzTrgn2uZfte14dcT41x93Xuvt/dDwBjqMefv5k1IPKFOMHdXwxGJ8xnH2v9D/fzT5RQmAZ0MbMOZpYKDAEmhVxTjTCzzOCgE2aWCZwBfHHwueqlScDVwfOrgVdCrKVGlX0hBs6nnn7+ZmbA48B8d78/alJCfPaVrf/hfv4JcfYRQHAa1oNAMjDW3X8Tckk1wsw6Etk6AEgBnqnv625mzwKnEOk2eB3wS+Bl4HmgLbACuNjd690B2UrW/RQiuw4cWAb8qGwfe31iZoOAj4A5wIFg9F1E9qsnwmdf2foP5TA+/4QJBRERObRE2X0kIiJVoFAQEZFyCgURESmnUBARkXIKBRERKadQkFrDzD4N/rY3s8uqedl3xXqteDGz88zsF3Fa9l2HbnXYyzzWzMZV93Kl7tEpqVLrmNkpwE/c/YeHMU+yu+8/yPTt7t6oOuqrYj2fAudWtWdaM0tx99Iqto3LupjZO8Bwd19R3cuWukNbClJrmNn24OnvgZOCvt/vMLNkM/uDmU0LOvX6UdD+lKD/+GeIXLCDmb0cdPw3t6zzPzP7PZARLG9C9GtZxB/M7AuL3HPi0qhlv29mL5jZl2Y2IbhiFDP7vZnNC2r5RnfEZtYV2FMWCGY2zsz+YmYfmdlCM/thMH6Ymf3NzF4F3grG/TRqPX8VY9mx1uUKM5sajHss6CoeM9tuZr8xs1lmNtnMmgfjLw7Wd5aZfRi1+FeJXO0viczd9dCjVjyI9PkOkStwX4safz3w8+B5GlAIdAja7QA6RLVtGvzNIHI5f270smO81oXA20SudG9O5IrXlsGyS4j0k5UEfAYMApoCC/j3VnZOjPW4Brgvangc8M9gOV2I9MWVTqSf/6Koms8gcqN1C9q+Bpxc2fsUPP8OkS/zBsHwo8BVwXMHzgme3xv1Hs4B8ivWD5wIvBr2vwM9wn1oS0HqgjOAq8xsJpEuC3KJfLkCTHX3r6La3mpms4DJRDpB7MLBDQKe9UiHYeuAD4B+Ucsu8khHYjOB9sBWYDfwVzO7ANgZY5ktgeIK45539wPuvghYChwTjH/b/93lwhnBYwbwedDmUPX/B9AXmBa8P/8BdAym7SUSLBC54Ur74PknwDgzG0EkDMusB1od4vWknksJuwCRKjDgFnd/82sjI8cedlQYPg04wd13mtn7RH6RH2rZldkT9Xw/kOLupWbWn8iX7xBgJHBqhfl2AdkVxlU8eFc2vCNqnAG/c/fHDlFzNAPGu/vPYkzb5+5lr7Of4P+7u99gZgOAHwAzzay3u28k8l7tOozXlnpIWwpSG20DsqKG3wRuDLoFxsy6Bj2+VpQNbA4C4Rjg+Khp+8rmr+BD4NLguEUz4GRgamWFBX3VZ7v7G8DtRDoaq2g+0LnCuIvNLMnMOhH5Jb8gxnxvAsOD18DM8s0s1g1hotflXeCisnYWuR9xu8rqD9p0cvcp7v4LYAP/7la+K/W0B1WpOm0pSG00GygNdgONAx4isuvj8+BgbzGxb6n4T+AGM5tN5Et3ctS00cBsM/vc3S+PGv8ScAIwi8iv9/9y97VBqMSSBbxiZulEfqXfEaPNh8B9ZmZRv9QXENk11Ry4wd13B8ety7n7W2b2HeCzYNp24Aq+2f//19bFzH5O5M56ScA+4GZgeSX1A/zBzLoE9b8brDvA94DXDzKfJACdkioSB2b2EJGDtu8E5/+/5u4vhFxWpcwsjUhoDfIqnhor9ZN2H4nEx2+BhmEXcRjaAncqEERbCiIiUk5bCiIiUk6hICIi5RQKIiJSTqEgIiLlFAoiIlLu/wOywYukHk+RRQAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "def two_layer_model(X,Y,layer_dims,learning_rate=0.0075,num_iterations=3000,print_cost=False,isPlot=True):\n",
    "    \"\"\"\n",
    "    实现一个两层的神经网络，【LINEAR->RELU】 -> 【LINEAR->SIGMOID】\n",
    "    参数：\n",
    "        X - 输入的数据，维度为(n_x，例子数)\n",
    "        Y - 标签，向量，0为非猫，1为猫，维度为(1,数量)\n",
    "        layers_dims - 层数的向量，维度为(n_y,n_h,n_y)\n",
    "        learning_rate - 学习率\n",
    "        num_iterations - 迭代的次数\n",
    "        print_cost - 是否打印成本值，每100次打印一次\n",
    "        isPlot - 是否绘制出误差值的图谱\n",
    "    返回:\n",
    "        parameters - 一个包含W1，b1，W2，b2的字典变量\n",
    "    \"\"\"\n",
    "    \n",
    "    np.random.seed(1)\n",
    "    grads = {}\n",
    "    costs = []\n",
    "    (n_x, n_h, n_y) = layers_dims\n",
    "    \n",
    "    \"\"\"\n",
    "    初始化参数\n",
    "    \"\"\"\n",
    "    parameters = initialize_parameters(n_x, n_h, n_y)\n",
    "    \n",
    "    W1 = parameters[\"W1\"]\n",
    "    b1 = parameters[\"b1\"]\n",
    "    W2 = parameters[\"W2\"]\n",
    "    b2 = parameters[\"b2\"]\n",
    "    \n",
    "    \"\"\"\n",
    "    开始迭代\n",
    "    \"\"\"\n",
    "    \n",
    "    for i in range(0,num_iterations):\n",
    "        #前向传播\n",
    "        A1, cache1 = linear_activation_forward(X,W1,b1,\"relu\")\n",
    "        A2, cache2 = linear_activation_forward(A1,W2,b2,\"sigmoid\")\n",
    "        \n",
    "        #计算成本\n",
    "        cost = compute_cost(A2,Y)\n",
    "        \n",
    "        #后向传播\n",
    "        ##初始化后向传播\n",
    "        dA2 = - (np.divide(Y,A2) - np.divide(1-Y, 1-A2))\n",
    "        \n",
    "        ##后向传播，输入：\"dA2，cache2，cache1\"  \n",
    "        ##          输出：\"dA1，dW2，db2;还有dA0（未使用），dW1，db1\"\n",
    "        dA1, dW2, db2 = linear_activation_backward(dA2,cache2,\"sigmoid\")\n",
    "        dA0, dW1, db1 = linear_activation_backward(dA1,cache1,\"relu\")\n",
    "        \n",
    "        ##后向传播完成后的数据保存到grads\n",
    "        grads[\"dW1\"] = dW1\n",
    "        grads[\"db1\"] = db1\n",
    "        grads[\"dW2\"] = dW2\n",
    "        grads[\"db2\"] = db2\n",
    "        \n",
    "        #更新参数\n",
    "        parameters = update_parameters(parameters, grads, learning_rate)\n",
    "        W1 = parameters[\"W1\"]\n",
    "        b1 = parameters[\"b1\"]\n",
    "        W2 = parameters[\"W2\"]\n",
    "        b2 = parameters[\"b2\"]\n",
    "        \n",
    "        #打印成本值，如果print_cost=False则忽略\n",
    "        if i % 100 == 0:\n",
    "            #记录成本\n",
    "            costs.append(cost)\n",
    "            #是否打印成本值\n",
    "            if print_cost:\n",
    "                print(\"第 \",i,\" 次迭代，成本值为： \",np.squeeze(cost))\n",
    "                \n",
    "    #迭代完成，根据条件绘制图\n",
    "    if isPlot:\n",
    "        plt.plot(np.squeeze(costs))\n",
    "        plt.ylabel('cost')\n",
    "        plt.xlabel('iterations (pre tens)')\n",
    "        plt.title(\"learning rate = \" + str(learning_rate))\n",
    "        plt.show()\n",
    "        \n",
    "    #返回parameters\n",
    "    return parameters\n",
    "\n",
    "# 加载数据集，进行处理\n",
    "train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = lr_utils.load_dataset()\n",
    "\n",
    "train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T \n",
    "test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n",
    "\n",
    "train_x = train_x_flatten / 255\n",
    "train_y = train_set_y\n",
    "test_x = test_x_flatten / 255\n",
    "test_y = test_set_y\n",
    "\n",
    "n_x = 12288\n",
    "n_h = 7\n",
    "n_y = 1\n",
    "layers_dims = (n_x,n_h,n_y)\n",
    "parameters = two_layer_model(train_x,train_set_y,layer_dims=(n_x,n_h,n_y),num_iterations=2500,print_cost=True,isPlot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "准确度为： 1.0\n",
      "准确度为： 0.72\n"
     ]
    }
   ],
   "source": [
    "def predict(X,y,parameters):\n",
    "    \"\"\"\n",
    "    该函数用于预测L层神经网络的结果，当然也包含两层\n",
    "    \n",
    "    参数：\n",
    "        X - 测试集\n",
    "        y - 标签\n",
    "        parameters - 训练模型的参数\n",
    "    \n",
    "    返回：\n",
    "        p - 给定数据集X的预测\n",
    "    \"\"\"\n",
    "    \n",
    "    m = X.shape[1]\n",
    "    n = len(parameters) // 2\n",
    "    p = np.zeros((1,m))\n",
    "    \n",
    "    #根据参数前向传播\n",
    "    probas,caches = L_model_forward(X,parameters)\n",
    "    \n",
    "    for i in range(0,probas.shape[1]):\n",
    "        if probas[0,i] > 0.5:\n",
    "            p[0,i] = 1\n",
    "        else:\n",
    "            p[0,i] = 0\n",
    "    \n",
    "    print(\"准确度为： \" + str(float(np.sum((p == y))/m)))\n",
    "    \n",
    "    return p\n",
    "\n",
    "predictions_train = predict(train_x, train_y, parameters) #训练集\n",
    "predictions_test = predict(test_x, test_y, parameters) #测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def L_layer_model(X,Y,layers_dims,learning_rate=0.0075,num_iterations=3000,print_cost=False,isPlot=True):\n",
    "    \"\"\"\n",
    "    实现一个L层神经网络：[LINEAR-> RELU] *（L-1） - > LINEAR-> SIGMOID。\n",
    "    \n",
    "    参数：\n",
    "\t    X - 输入的数据，维度为(n_x，例子数)\n",
    "        Y - 标签，向量，0为非猫，1为猫，维度为(1,数量)\n",
    "        layers_dims - 层数的向量，维度为(n_y,n_h,···,n_h,n_y)\n",
    "        learning_rate - 学习率\n",
    "        num_iterations - 迭代的次数\n",
    "        print_cost - 是否打印成本值，每100次打印一次\n",
    "        isPlot - 是否绘制出误差值的图谱\n",
    "    \n",
    "    返回：\n",
    "     parameters - 模型学习的参数。 然后他们可以用来预测。\n",
    "    \"\"\"\n",
    "    \n",
    "    np.random.seed(1)\n",
    "    costs = []\n",
    "    \n",
    "    parameters = initialize_parameters_deep(layers_dims)\n",
    "    \n",
    "    for i in range(0,num_iterations):\n",
    "        AL, caches = L_model_forward(X,parameters)\n",
    "        cost = compute_cost(AL,Y)\n",
    "        grads = L_model_backward(AL,Y,caches)\n",
    "        parameters = update_parameters(parameters,grads,learning_rate)\n",
    "        \n",
    "        #打印成本值，如果print_cost = False则忽略\n",
    "        if i % 100 == 0:\n",
    "            #记录成本\n",
    "            costs.append(cost)\n",
    "            #是否打印成本值\n",
    "            if print_cost:\n",
    "                print(\"第 \",i,\" 次迭代，成本值为： \",np.squeeze(cost))\n",
    "    \n",
    "    #迭代完成，根据条件绘制图\n",
    "    if isPlot:\n",
    "        plt.plot(np.squeeze(costs))\n",
    "        plt.ylabel('cost')\n",
    "        plt.xlabel('iterations (pre tens)')\n",
    "        plt.title(\"learning rate = \" + str(learning_rate))\n",
    "        plt.show()\n",
    "        \n",
    "    #返回parameters\n",
    "    return parameters\n",
    "\n",
    "# 加载数据集，进行处理\n",
    "train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = lr_utils.load_dataset()\n",
    "\n",
    "train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T \n",
    "test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n",
    "\n",
    "train_x = train_x_flatten / 255\n",
    "train_y = train_set_y\n",
    "test_x = test_x_flatten / 255\n",
    "test_y = test_set_y\n",
    "\n",
    "# 正式训练\n",
    "layers_dims = [12288, 20, 7, 5, 1] #  5-layer model\n",
    "parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True,isPlot=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_train = predict(train_x, train_y, parameters) #训练集\n",
    "pred_test = predict(test_x, test_y, parameters) #测试集"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
