{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "8dafc7fc",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "<center> <b><font size=25>Lecture 7 Deep Learning(part two) </font></b><center>\n",
    "<center>Fupeng Li@CCNU, Email:fupengli29@mails.ccnu.edu.cn<center>\n",
    "<center>2023.11.29<center>"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "aefba6e7",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "<b><font size=65>outline</font></b>\n",
    "1. Forward Propagation\n",
    "2. Backpropagation\n",
    "3. Optimizer\n",
    "4. Example: Classification of quark and gluon jet\n",
    "5. Summary"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fe92d891",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "<b><font size=45>0. Begin</font></b>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "603c5bd8",
   "metadata": {
    "slideshow": {
     "slide_type": "-"
    }
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "\n",
       "        <iframe\n",
       "            width=\"1400\"\n",
       "            height=\"800\"\n",
       "            src=\"http://playground.tensorflow.org/\"\n",
       "            frameborder=\"0\"\n",
       "            allowfullscreen\n",
       "        ></iframe>\n",
       "        "
      ],
      "text/plain": [
       "<IPython.lib.display.IFrame at 0x21bb423f760>"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from IPython.display import IFrame\n",
    "IFrame('http://playground.tensorflow.org/',width=1400, height=800)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a7303fca",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "<b><font size=45>1. Forward Propagation</font></b>\n",
    " <div align=center><img src=\"1.jpg\"></div>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "fa8bad78",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入数据： [[0.3878969  0.64702248 0.85309343]]\n",
      "输出结果： [[0.92233385]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "\n",
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "# 定义神经网络的结构\n",
    "input_size = 3\n",
    "hidden_size = 4\n",
    "output_size = 1\n",
    "\n",
    "# 初始化权重和偏置\n",
    "weights_input_hidden = np.random.rand(input_size, hidden_size)\n",
    "bias_input_hidden = np.random.rand(1, hidden_size)\n",
    "\n",
    "weights_hidden_output = np.random.rand(hidden_size, output_size)\n",
    "bias_hidden_output = np.random.rand(1, output_size)\n",
    "\n",
    "# 输入数据\n",
    "input_data = np.random.rand(1, input_size)\n",
    "\n",
    "# 前向传播\n",
    "# 输入层到隐藏层\n",
    "hidden_input = np.dot(input_data, weights_input_hidden) + bias_input_hidden\n",
    "hidden_output = sigmoid(hidden_input)\n",
    "\n",
    "# 隐藏层到输出层\n",
    "output_input = np.dot(hidden_output, weights_hidden_output) + bias_hidden_output\n",
    "output = sigmoid(output_input)\n",
    "\n",
    "print(\"输入数据：\", input_data)\n",
    "print(\"输出结果：\", output)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "195f5a89",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "<b><font size=45>2. Backpropagation</font></b>\n",
    " <div align=center><img src=\"2.jpg\"></div>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "9ff2bf01",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "\n",
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "def sigmoid_derivative(x):\n",
    "    return x * (1 - x)\n",
    "\n",
    "\n",
    "# 定义神经网络的结构\n",
    "input_size = 3\n",
    "hidden_size = 4\n",
    "output_size = 1\n",
    "learning_rate = 0.1\n",
    "epochs = 1000\n",
    "\n",
    "# 初始化权重和偏置\n",
    "weights_input_hidden = np.random.rand(input_size, hidden_size)\n",
    "bias_input_hidden = np.random.rand(1, hidden_size)\n",
    "\n",
    "weights_hidden_output = np.random.rand(hidden_size, output_size)\n",
    "bias_hidden_output = np.random.rand(1, output_size)\n",
    "\n",
    "# 输入数据\n",
    "input_data = np.random.rand(1, input_size)\n",
    "output_data = np.sum(input_data**2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "1b61193f",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch=0, error=0.000000\n",
      "epoch=250, error=0.000000\n",
      "epoch=500, error=0.000000\n",
      "epoch=750, error=0.000000\n",
      "Output from NN:3.924721e-01\n",
      "The true output:3.924721e-01\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(epochs):\n",
    "    # 前向传播\n",
    "    hidden_input = np.dot(input_data, weights_input_hidden) + bias_input_hidden\n",
    "    hidden_output = sigmoid(hidden_input)\n",
    "    output_input = np.dot(hidden_output, weights_hidden_output) + bias_hidden_output\n",
    "    output = sigmoid(output_input)\n",
    "    # 反向传播\n",
    "    # 计算输出层的误差\n",
    "    # loss function:(y - y_true)^2\n",
    "    output_error =  output - output_data\n",
    "    output_delta = 2 * output_error * sigmoid_derivative(output)\n",
    "    # 计算隐藏层的误差\n",
    "    hidden_error = output_delta.dot(weights_hidden_output.T)\n",
    "    hidden_delta = hidden_error * sigmoid_derivative(hidden_output)\n",
    "    # 更新权重和偏置\n",
    "    weights_hidden_output -= hidden_output.T.dot(output_delta) * learning_rate\n",
    "    bias_hidden_output -= np.sum(output_delta, axis=0, keepdims=True) * learning_rate\n",
    "\n",
    "    weights_input_hidden -= input_data.T.dot(hidden_delta) * learning_rate\n",
    "    bias_input_hidden -= np.sum(hidden_delta, axis=0, keepdims=True) * learning_rate\n",
    "    # 更新后的前向传播\n",
    "    hidden_output_updated = sigmoid(np.dot(input_data, weights_input_hidden) + bias_input_hidden)\n",
    "    output_updated = sigmoid(np.dot(hidden_output_updated, weights_hidden_output) + bias_hidden_output)\n",
    "    if epoch%250 == 0:\n",
    "        print(\"epoch=%s, error=%.6f\"%(epoch,((output_updated - output_data).item())**2))\n",
    "print(\"Output from NN:%.6e\"%output_updated.item())\n",
    "print(\"The true output:%.6e\"%output_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18e736fa",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "<b><font size=45>3. Optimizer</font></b>\n",
    " <div align=center><img src=\"3.webp\"></div>"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c0bdd9c2",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "# 1. $L = \\theta^2$\n",
    "# 2. $L(x_1, x_2) =0.1x_1^2 + 2x_2^2$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "d40bb4f5",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib import ticker, cm\n",
    "import seaborn as sns\n",
    "import ipywidgets as widgets\n",
    "from ipywidgets import interact, interact_manual\n",
    "import math\n",
    "plt.style.use([\"science\", \"notebook\", \"no-latex\"])\n",
    "\n",
    "to_html = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "79a2ae97",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "def loss_function(theta):\n",
    "    '''举例：简单的损失函数 L = theta^2 '''\n",
    "    return theta**2\n",
    "\n",
    "def negative_gradient(theta):\n",
    "    '''损失函数对参数的负梯度'''\n",
    "    return - 2 * theta"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "aee69df0",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6961f5ea8e794e4292d7c9811226e71e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "interactive(children=(FloatSlider(value=2.0, description='lr', max=4.0, min=0.1), Output()), _dom_classes=('wi…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "@interact\n",
    "\n",
    "def gradient_descent(lr=(0.1, 4, 0.1)):\n",
    "    ''''''\n",
    "    nsteps = 20\n",
    "    theta = np.linspace(-1.5, 1.5, 100)\n",
    "    loss = loss_function(theta)\n",
    "    plt.plot(theta, loss)\n",
    "    theta_i = 1\n",
    "    history = [theta_i] \n",
    "    # 记录训练过程中参数每一次更新后的值\n",
    "    for n in range(nsteps):\n",
    "        theta_i = theta_i + lr * negative_gradient(theta_i)\n",
    "        history.append(theta_i)\n",
    "    \n",
    "    history = np.array(history)\n",
    "    fi = loss_function(history)\n",
    "    plt.plot(history, fi, 'ro-')\n",
    "    plt.xlim(-1.5, 1.5)\n",
    "    plt.ylim(-0.2, 2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "9295acae",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "# 定义损失函数，损失函数的梯度以及训练过程\n",
    "def f_2d(x1, x2):\n",
    "    '''original function to minimize'''\n",
    "    return 0.1 * x1 ** 2 + 2 * x2 ** 2\n",
    "\n",
    "def f_grad(x1, x2):\n",
    "    '''the gradient dfdx1 and dfdx2'''\n",
    "    dfdx1 = 0.2 * x1\n",
    "    dfdx2 = 4 * x2\n",
    "    return dfdx1, dfdx2\n",
    "\n",
    "def train_2d(trainer, lr):\n",
    "    \"\"\"Train a 2d object function with a customized trainer\"\"\"\n",
    "    x1, x2 = -5, -2\n",
    "    s_x1, s_x2 = 0, 0\n",
    "    res = [(x1, x2)]\n",
    "    for i in range(50):\n",
    "        # trainer 可以是 SGD, Momentum, RMSProp, Adam 等\n",
    "        x1, x2, s_x1, s_x2, lr = trainer(x1, x2, s_x1, s_x2, lr)\n",
    "        res.append((x1, x2))\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "d1a699d6",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "def plot_2d(res, figsize=(10, 6), title=None):\n",
    "    x1_, x2_ = zip(*res)\n",
    "    fig = plt.figure(figsize=figsize)\n",
    "    plt.plot([0], [0], 'r*', ms=15)\n",
    "    plt.text(0.0, 0.25, 'minimum', color='w')\n",
    "    # 绘制开始位置\n",
    "    plt.plot(x1_[0], x2_[0], 'ro', ms=10)\n",
    "    plt.text(x1_[0]+0.1, x2_[0]+0.2, 'start', color='w')\n",
    "    plt.plot(x1_, x2_, '-o', color='#ff7f0e')\n",
    "    # 绘制结束位置\n",
    "    plt.plot(x1_[-1], x2_[-1], 'wo')\n",
    "    plt.text(x1_[-1], x2_[-1]-0.25, 'end', color='w')\n",
    "    # 绘制训练过程动画的背景：0.1*x1**2 + 2*x2**2 等高图\n",
    "    x1 = np.linspace(-5.5, 3, 50)\n",
    "    x2 = np.linspace(min(-3.0, min(x2_) - 1), max(3.0, max(x2_) + 1), 100)\n",
    "    x1, x2 = np.meshgrid(x1, x2)\n",
    "    plt.contourf(x1, x2, f_2d(x1, x2), cmap=cm.gnuplot)\n",
    "    plt.xlabel('x1')\n",
    "    plt.ylabel('x2')\n",
    "    plt.title(title)\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "id": "4e3cf28e",
   "metadata": {
    "collapsed": true,
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d30fb910243f4a09b2eb49086f3cc4a5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "interactive(children=(FloatSlider(value=0.29000000000000004, description='lr', max=0.6, min=0.01, step=0.02), …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "def sgd(x1, x2, s1, s2, lr):\n",
    "    '''随机梯度下降 sgd\n",
    "    :(x1, x2): 当前位置\n",
    "    :(s1, s2): 当前速度，动量机制使用'''\n",
    "    dfdx1, dfdx2 = f_grad(x1, x2)\n",
    "    return (x1 - lr * dfdx1, x2 - lr * dfdx2, 0, 0, lr)\n",
    "\n",
    "@interact\n",
    "def visualize_gradient_descent(lr=(0.01, 0.6, 0.02)):\n",
    "    '''可视化随机梯度下降学习过程'''\n",
    "    res = train_2d(sgd, lr)\n",
    "    plot_2d(res, title='SGD')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1536a98d",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "# Momentum\n",
    "\\begin{aligned}\n",
    "v_t &= \\gamma v_{t-1} + \\eta \\nabla_{\\theta} J(\\theta_{t-1}) \\\\\n",
    "\\theta_t &= \\theta_{t-1} - v_t\n",
    "\\end{aligned}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "id": "9332aa36",
   "metadata": {
    "collapsed": true,
    "slideshow": {
     "slide_type": "-"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "96db5e68eeff4004a0c40b65715d816c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "interactive(children=(FloatSlider(value=0.29000000000000004, description='lr', max=0.6, min=0.01, step=0.02), …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "def momentum(x1, x2, v1, v2, lr, gamma):\n",
    "    '''带动量机制的 SGD 算法的实现'''\n",
    "    dfdx1, dfdx2 = f_grad(x1, x2)\n",
    "    v1 = gamma * v1 + lr * dfdx1\n",
    "    v2 = gamma * v2 + lr * dfdx2\n",
    "    x1 = x1 - v1\n",
    "    x2 = x2 - v2\n",
    "    return (x1, x2, v1, v2, lr)\n",
    "\n",
    "@interact\n",
    "def visualize_sgd_momentum(lr=(0.01, 0.6, 0.02), gamma=(0.4, 0.9, 0.1)):\n",
    "    '''lr: learning rate\n",
    "    gamma: parameter for momentum sgd'''  \n",
    "    trainer = lambda x1, x2, v1, v2, lr: momentum(x1, x2, v1, v2, lr, gamma)\n",
    "    res = train_2d(trainer, lr)\n",
    "    plot_2d(res, title='momentum')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "07ef5aed",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "# Adagrad\n",
    "\\begin{aligned}\n",
    "g_t &=  \\nabla_{\\theta} L(\\theta)\\\\\n",
    "G &= \\sum_{t} g_t^2\\\\\n",
    "\\theta &= \\theta - \\frac{\\eta}{\\sqrt{G + \\epsilon}} \\cdot g_t\n",
    "\\end{aligned}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "id": "a2944708",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "99e4c7e1ce2c424d9436181d0ad506ad",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "interactive(children=(FloatSlider(value=2.0, description='lr', max=4.0, step=0.01), Output()), _dom_classes=('…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "@interact\n",
    "def visualize_adagrad(lr=(0, 4, 0.01)):\n",
    "    '''lr: learning rate'''\n",
    "    def adagrad_2d(x1, x2, s1, s2, lr):\n",
    "        g1, g2 = f_grad(x1, x2)\n",
    "        eps = 1e-6\n",
    "        s1 += g1 ** 2\n",
    "        s2 += g2 ** 2\n",
    "        x1 -= lr / math.sqrt(s1 + eps) * g1\n",
    "        x2 -= lr / math.sqrt(s2 + eps) * g2\n",
    "        return x1, x2, s1, s2, lr\n",
    "    \n",
    "    res = train_2d(adagrad_2d, lr)\n",
    "    plot_2d(res, title='adagrad')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "99baad93",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "# RMSProp\n",
    "\\begin{aligned}\n",
    "g&=  \\nabla_{\\theta} L(\\theta)\\\\\n",
    "E\\left[g^2\\right] &= \\gamma E\\left[g^2\\right] + (1-\\gamma) g^2\\\\\n",
    "\\theta &= \\theta - \\frac{\\eta}{\\sqrt{E\\left[g^2\\right] + \\epsilon}} \\cdot g\n",
    "\\end{aligned}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "id": "84093eba",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "67547600622d45f89887c1ba4be443c2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "interactive(children=(FloatSlider(value=0.0, description='lr', max=1.0, step=0.001), FloatSlider(value=0.495, …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "@interact\n",
    "def visualize_rmsprop(lr=(0, 1, 0.001), gamma=(0, 0.99, 0.001)):\n",
    "    '''lr: learning rate, \n",
    "       gamma: momentum'''  \n",
    "    def rmsprop_2d(x1, x2, s1, s2, lr):\n",
    "        eps = 1e-6\n",
    "        g1, g2 = f_grad(x1, x2)\n",
    "        s1 = gamma * s1 + (1 - gamma) * g1 ** 2\n",
    "        s2 = gamma * s2 + (1 - gamma) * g2 ** 2\n",
    "        x1 -= lr / math.sqrt(s1 + eps) * g1\n",
    "        x2 -= lr / math.sqrt(s2 + eps) * g2\n",
    "        return x1, x2, s1, s2, lr\n",
    "\n",
    "    res = train_2d(rmsprop_2d, lr)\n",
    "    plot_2d(res, title='rmsprop')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ed03859c",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "# Adam\n",
    "\\begin{align}\n",
    "\\theta = \\theta - \\frac{\\eta}{\\sqrt{\\hat{n}} + \\epsilon} \\hat{m}\n",
    "\\end{align}\n",
    "* where m is the sliding average of the gradient and n is the sliding average of the gradient squared:\n",
    "\\begin{align}\n",
    "g & = \\nabla_{\\theta} L(\\theta), \\\\\n",
    "m &= \\beta_1 m + (1 - \\beta_1) g, \\\\\n",
    "n &= \\beta_2 n + (1 - \\beta_2) g^2 \\\\\n",
    "\\hat{m} &= \\frac{m}{(1 - \\beta_1^t)}, \\\\\n",
    "\\hat{n} &= \\frac{n}{(1 - \\beta_2^t)}\n",
    "\\end{align}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "id": "fda43b45",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "def adam_core(lr, m, n, g, t, beta1, beta2):\n",
    "    '''Adam 优化算法的核心'''\n",
    "    eps = 1.0E-6\n",
    "    m = beta1 * m + (1 - beta1) * g\n",
    "    n = beta2 * n + (1 - beta2) * g*g\n",
    "    m_hat = m / (1 - beta1**t)\n",
    "    n_hat = n / (1 - beta2**t)\n",
    "    dx = lr * m_hat / (math.sqrt(n_hat) + eps)\n",
    "    return m, n, dx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "id": "0c135c8b",
   "metadata": {
    "collapsed": true,
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "150ccff6f4fb489eb134c302fb85502f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "interactive(children=(FloatSlider(value=0.0, description='lr', max=1.0, step=0.01), FloatSlider(value=0.49, de…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "@interact\n",
    "def visualize_adam(  lr=(0, 1, 0.01), beta1=(0, 0.99, 0.01),beta2=(0, 0.99, 0.01),):\n",
    "    ''' 可视化 Adam 优化算法的学习过程\n",
    "    lr: learning rate\n",
    "    beta1: parameter for E(g)\n",
    "    beta2: parameter for E(g^2)\n",
    "    '''    \n",
    "    def adam_2d(x1, x2, m1, n1, m2, n2, lr, t):\n",
    "        '''m1, m2: E(g1), E(g2)\n",
    "           n1, n2: E(g1^2), E(g2^2) where E() is expectation\n",
    "           lr: learning rate\n",
    "           t: time step'''\n",
    "        eps = 1e-6\n",
    "        g1, g2 = f_grad(x1, x2)\n",
    "        m1, n1, dx1 = adam_core(lr, m1, n1, g1, t, beta1, beta2)\n",
    "        m2, n2, dx2 = adam_core(lr, m2, n2, g2, t, beta1, beta2)       \n",
    "        x1 -= dx1\n",
    "        x2 -= dx2\n",
    "        return x1, x2, m1, n1, m2, n2, lr\n",
    "    \n",
    "    def train_adam(trainer, lr):\n",
    "        \"\"\"Train a 2d object function with a customized trainer\"\"\"\n",
    "        x1, x2 = -5, -2\n",
    "        m1, n1, m2, n2 = 0, 0, 0, 0\n",
    "        res = [(x1, x2)]\n",
    "        for i in range(30):\n",
    "            x1, x2, m1, n1, m2, n2, lr = trainer(x1, x2, m1, n1, m2, n2, lr, i+1)\n",
    "            res.append((x1, x2))\n",
    "        return res\n",
    "    \n",
    "    res = train_adam(adam_2d, lr)\n",
    "    plot_2d(res, title='adam')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "59ba91ab",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "# <b><font size=45>4. Example: Classification of quark and gluon jet</font></b>\n",
    "* Determine whether the quarks and gluons come from quarks or gluons based on the final-state hadrons they fragment into"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "aca7f05a",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'np' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-19-c81d7613ced8>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"QG_jets.npz\"\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mdat\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m     \u001b[0mx_qg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdat\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m\"X\"\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m     \u001b[0my_qg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdat\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m\"y\"\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0minteract\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'np' is not defined"
     ]
    }
   ],
   "source": [
    "with np.load(\"QG_jets.npz\") as dat:\n",
    "    x_qg = dat[\"X\"]\n",
    "    y_qg = dat[\"y\"]\n",
    "    \n",
    "@interact\n",
    "def visualize(eid=range(0, 100)):\n",
    "    '''对第 eid 个样本进行可视化，探索数据'''\n",
    "    dat = x_qg[eid]\n",
    "    pt = dat[:, 0]\n",
    "    rapidity = dat[:, 1]\n",
    "    phi = dat[:, 2]\n",
    "    plt.hist2d(rapidity, phi, \n",
    "               bins=28, cmin=0, cmax=5, cmap=plt.cm.jet)\n",
    "    # 这里为了可视化将 count>cmax 的像素设为 nan"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c01817f",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "xmin, xmax = -1.7, 1.7\n",
    "ymin, ymax = 0,  2*np.pi\n",
    "# 将所有图像都对齐到 |y| < 1.7, phi in [0, 2pi] 范围\n",
    "ranges = [[xmin, xmax], [ymin, ymax]]\n",
    "\n",
    "def to_img(eid=0, bins=28):\n",
    "    dat = x_qg[eid]\n",
    "    rapidity = dat[:, 1]\n",
    "    phi = dat[:, 2]\n",
    "    # 将 （eta, phi) 分布转化为 2D 图像\n",
    "    img, _, _ = np.histogram2d(rapidity, phi, \n",
    "                   bins = bins,\n",
    "                   range=ranges,\n",
    "                   normed=True)\n",
    "    \n",
    "    return img\n",
    "\n",
    "num_events = len(x_qg)\n",
    "\n",
    "x_qg_imgs = [to_img(i) for i in tqdm(range(num_events))]\n",
    "# x_qg_imgs 中每张图的最大值远大于 1，\n",
    "# 使用所有事例中, 2d image 中像素最大的值进行缩放\n",
    "x_qg_imgs = np.array(x_qg_imgs)\n",
    "x_qg_imgs = x_qg_imgs / x_qg_imgs.max()\n",
    "# 为了使用 mnist 所示卷积神经网络，将数据增加一个维度\n",
    "x_qg_imgs = x_qg_imgs.reshape(100000, 28, 28, -1)\n",
    "y_qg_onehot = keras.utils.to_categorical(y_qg, 2)\n",
    "from sklearn.model_selection import train_test_split\n",
    "x_train, x_test, y_train, y_test = train_test_split(x_qg_imgs, y_qg_onehot, test_size=0.3)\n",
    "num_classes = 2\n",
    "input_shape = (28,28,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "33d17fab",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "model = keras.Sequential(\n",
    "    [\n",
    "        keras.Input(shape=input_shape),\n",
    "        layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n",
    "        layers.MaxPooling2D(pool_size=(2, 2)),\n",
    "        layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n",
    "        layers.MaxPooling2D(pool_size=(2, 2)),\n",
    "        layers.Flatten(),\n",
    "        layers.Dropout(0.5),\n",
    "        layers.Dense(num_classes, activation=\"softmax\"),\n",
    "    ]\n",
    ")\n",
    "\n",
    "model.summary()\n",
    "batch_size = 128\n",
    "epochs = 30\n",
    "model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
    "history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d822d1ee",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "acc = history.history[\"accuracy\"]\n",
    "val_acc = history.history[\"val_accuracy\"]\n",
    "loss = history.history[\"loss\"]\n",
    "val_loss = history.history[\"val_loss\"]\n",
    "epochs = range(len(acc))\n",
    "\n",
    "plt.plot(epochs,acc, \"b\", label=\"Training accuracy\")\n",
    "plt.plot(epochs, val_acc, \"r\", label=\"validation accuracy\")\n",
    "plt.title(\"Training and validation accuracy\")\n",
    "plt.legend(loc=\"lower right\")\n",
    "plt.figure()\n",
    "\n",
    "plt.plot(epochs, loss, \"r\", label=\"Training loss\")\n",
    "plt.plot(epochs, val_loss, \"b\", label=\"validation loss\")\n",
    "plt.title(\"Training and validation loss\")\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "47bd711a",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "#  <b><font size=45>5. summary</font></b>\n",
    "1. Forward and back propagation form the basis of deep neural network training\n",
    "2. Optimizer is a very important component of deep learning that minimizes the loss function by way of tuning the model parameters.\n",
    "3. Listening to a hundred reports is not as good as practicing one.\n",
    " <div align=center><img src=\"4.jpg\"></div>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3e59f027",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "celltoolbar": "幻灯片",
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
