{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "说明：斜体字母表示带求解参数，直体字母表示样本；小写表示标量，大写表示向量或矩阵；下标表示形状(样本量用*表示)。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n",
    "\n",
    "import torch\n",
    "import numpy as np\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "ORI_MODELS_DIR = 'D:/Genlovy_Hoo/HooProjects/HooLLM/ori_models'\n",
    "if not os.path.exists(ORI_MODELS_DIR):\n",
    "    ORI_MODELS_DIR = 'C:/GenlovyHoo/HooProjects/HooLLM/ori_models'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 一元线性回归"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 公式"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "${\\rm y} = k{\\rm x} + b$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 样本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x: tensor([1.0565, 1.8786, 3.4898, 3.8746, 4.8597])\n",
      "y: tensor([ 3.1712,  4.8901,  7.0365,  8.9364, 10.9687])\n"
     ]
    }
   ],
   "source": [
    "# y = 2 * x + 1\n",
    "\n",
    "x = torch.arange(1, 6) + torch.normal(0, 0.2, (5,))  # 均值，标准差，shape\n",
    "y = torch.arange(3, 12, 2) + torch.normal(0, 0.1, (5,))\n",
    "print('x:', x)\n",
    "print('y:', y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* pytorch前向"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " y: tensor([ 3.1712,  4.8901,  7.0365,  8.9364, 10.9687])\n",
      "y_: tensor([ 3.1130,  4.7572,  7.9796,  8.7492, 10.7193], grad_fn=<ViewBackward0>)\n"
     ]
    }
   ],
   "source": [
    "lr_1d = torch.nn.Linear(in_features=1, out_features=1, bias=True)\n",
    "lr_1d.weight = torch.nn.Parameter(torch.tensor([[2.0]]))  # 手动设置权重\n",
    "lr_1d.bias = torch.nn.Parameter(torch.tensor([1.0]))\n",
    "y_ = lr_1d(x.view(-1, 1))\n",
    "print(' y:', y)\n",
    "print('y_:', y_.view(-1,))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 多元线性回归"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 公式"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "${\\rm Y_{*, d2}} = {\\rm X_{*, d1}}W_{d1, d2} + B_{*, d2}$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 样本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "X: \n",
      " tensor([[1.2312, 0.9599, 0.9377, 0.9283, 1.0802, 1.1014],\n",
      "        [0.8240, 0.6402, 0.9632, 0.8110, 0.8043, 1.1230],\n",
      "        [1.2973, 0.9450, 0.9618, 0.9103, 1.1573, 0.9420],\n",
      "        [1.1934, 1.0167, 1.1079, 0.7155, 0.7024, 0.8475],\n",
      "        [0.7658, 1.1172, 1.1671, 0.8411, 1.1909, 0.9459],\n",
      "        [1.2041, 0.8839, 0.6596, 0.8974, 1.1508, 0.8218],\n",
      "        [1.0694, 1.1358, 0.8477, 1.2215, 1.2079, 0.6496],\n",
      "        [1.2250, 0.8829, 0.8497, 1.0824, 0.9391, 1.0872],\n",
      "        [1.0099, 0.8165, 1.0211, 0.5044, 0.8934, 0.7864],\n",
      "        [1.1499, 0.8751, 0.8021, 1.2684, 1.0757, 1.0008]])\n",
      "Y: \n",
      " tensor([[12.9641, 12.9319, 13.0474],\n",
      "        [13.1548, 13.1888, 12.9256],\n",
      "        [13.0911, 13.0255, 13.0596],\n",
      "        [12.8950, 13.0730, 12.9729],\n",
      "        [12.9626, 12.9285, 12.8221],\n",
      "        [13.0718, 13.1035, 13.0633],\n",
      "        [12.8981, 13.1437, 12.9637],\n",
      "        [12.9561, 12.8991, 12.9700],\n",
      "        [13.0403, 13.0393, 13.2668],\n",
      "        [13.0702, 13.1514, 12.8517]])\n"
     ]
    }
   ],
   "source": [
    "d1, d2 = 6, 3\n",
    "n = 10\n",
    "X = torch.normal(1.0, 0.2, (n, d1))\n",
    "print('X: \\n', X)\n",
    "Y = torch.normal(13.0, 0.1, (n, d2))\n",
    "print('Y: \\n', Y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* pytorch前向"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Y: \n",
      " tensor([[12.9641, 12.9319, 13.0474],\n",
      "        [13.1548, 13.1888, 12.9256],\n",
      "        [13.0911, 13.0255, 13.0596],\n",
      "        [12.8950, 13.0730, 12.9729],\n",
      "        [12.9626, 12.9285, 12.8221],\n",
      "        [13.0718, 13.1035, 13.0633],\n",
      "        [12.8981, 13.1437, 12.9637],\n",
      "        [12.9561, 12.8991, 12.9700],\n",
      "        [13.0403, 13.0393, 13.2668],\n",
      "        [13.0702, 13.1514, 12.8517]])\n",
      "Y_: \n",
      " tensor([[13.4774, 13.4774, 13.4774],\n",
      "        [11.3313, 11.3313, 11.3313],\n",
      "        [13.4273, 13.4273, 13.4273],\n",
      "        [12.1668, 12.1668, 12.1668],\n",
      "        [13.0561, 13.0561, 13.0561],\n",
      "        [12.2352, 12.2352, 12.2352],\n",
      "        [13.2639, 13.2639, 13.2639],\n",
      "        [13.1325, 13.1325, 13.1325],\n",
      "        [11.0637, 11.0637, 11.0637],\n",
      "        [13.3442, 13.3442, 13.3442]], grad_fn=<AddmmBackward0>)\n"
     ]
    }
   ],
   "source": [
    "lr = torch.nn.Linear(in_features=d1, out_features=d2, bias=True)\n",
    "lr.weight = torch.nn.Parameter(2.0 * torch.ones(d2, d1))  # torchLinear权重形状为(dim_out, dim_in)\n",
    "lr.bias = torch.nn.Parameter(torch.ones(d2))\n",
    "Y_ = lr(X)\n",
    "print('Y: \\n', Y)\n",
    "print('Y_: \\n', Y_)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Softmax多分类"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 公式"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在多元线性回归的基础上增加softmax函数：\n",
    "\n",
    "${\\rm P_{*, d2}} = softmax({\\rm Y_{*, d2}}) = exp({\\rm Y_{*, d2}}) ~~/~ \\sum_{0}^{d2}exp({\\rm Y_{*, d2}})$ "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* pytorch前向"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "P: \n",
      " tensor([[0.3273, 0.3169, 0.3557],\n",
      "        [0.3534, 0.3656, 0.2810],\n",
      "        [0.3442, 0.3223, 0.3335],\n",
      "        [0.3053, 0.3647, 0.3300],\n",
      "        [0.3527, 0.3409, 0.3065],\n",
      "        [0.3307, 0.3414, 0.3279],\n",
      "        [0.2988, 0.3821, 0.3191],\n",
      "        [0.3380, 0.3193, 0.3427],\n",
      "        [0.3074, 0.3071, 0.3855],\n",
      "        [0.3462, 0.3755, 0.2783]])\n"
     ]
    }
   ],
   "source": [
    "# exp_Y = torch.exp(Y)\n",
    "# P = exp_Y / exp_Y.sum(axis=1).view(-1, 1)\n",
    "P = torch.nn.functional.softmax(Y, dim=1)\n",
    "print('P: \\n', P)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Logistics二分类"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 公式"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "可视为softmax多分类Y为1维的特殊情况：\n",
    "\n",
    "${\\rm Y_{*, 1}} = {\\rm X_{*, d1}}W_{d1, 1} + B_{*, 1}$\n",
    "\n",
    "${\\rm P_{*, 1}} = sigmoid({\\rm Y_{*, 1}}) = 1 ~ / ~ (1 + exp({-\\rm Y_{*, 1}}))$ "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 样本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "X: \n",
      " tensor([[0.9624, 0.9218, 0.8559],\n",
      "        [0.7482, 1.0800, 0.7598],\n",
      "        [0.9364, 1.1365, 1.1469],\n",
      "        [1.1024, 0.9339, 1.0748],\n",
      "        [0.6506, 1.1814, 0.9761]])\n",
      "Y: \n",
      " tensor([[0],\n",
      "        [0],\n",
      "        [0],\n",
      "        [1],\n",
      "        [0]])\n"
     ]
    }
   ],
   "source": [
    "d1, d2 = 3, 1\n",
    "n = 5\n",
    "X = torch.normal(1.0, 0.2, (n, d1))\n",
    "print('X: \\n', X)\n",
    "Y = torch.randint(0, 2, (n, d2))\n",
    "print('Y: \\n', Y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* pytorch前向"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Y_: \n",
      " tensor([[6.4801],\n",
      "        [6.1760],\n",
      "        [7.4394],\n",
      "        [7.2221],\n",
      "        [6.6163]], grad_fn=<AddmmBackward0>)\n"
     ]
    }
   ],
   "source": [
    "lr = torch.nn.Linear(in_features=d1, out_features=d2, bias=True)\n",
    "lr.weight = torch.nn.Parameter(2.0 * torch.ones(d2, d1))\n",
    "lr.bias = torch.nn.Parameter(torch.ones(d2))\n",
    "Y_ = lr(X)\n",
    "print('Y_: \\n', Y_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "P_: \n",
      " tensor([[0.9985],\n",
      "        [0.9979],\n",
      "        [0.9994],\n",
      "        [0.9993],\n",
      "        [0.9987]], grad_fn=<SigmoidBackward0>)\n"
     ]
    }
   ],
   "source": [
    "# exp_Y_ = torch.exp(-1.0 * Y_)\n",
    "# P_ = 1 / (1 + exp_Y_)\n",
    "P_ = torch.nn.functional.sigmoid(Y_)\n",
    "print('P_: \\n', P_)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 多层感知机MLP"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 公式"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "多层感知机可以视为多个多元线性回归+激活函数的堆叠"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* pytorch前向"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MLP(torch.nn.Module):\n",
    "    def __init__(self, dim_in, dim_hiddens, dim_out):\n",
    "        super(MLP, self).__init__()\n",
    "        self.layers = torch.nn.ModuleList()\n",
    "        \n",
    "        # 隐藏层\n",
    "        last_size = dim_in\n",
    "        for size in dim_hiddens:\n",
    "            self.layers.append(torch.nn.Linear(last_size, size))\n",
    "            last_size = size\n",
    "        \n",
    "        # 输出层\n",
    "        self.output_layer = torch.nn.Linear(last_size, dim_out)\n",
    "\n",
    "    def forward(self, x):\n",
    "        for layer in self.layers:\n",
    "            x = torch.relu(x)\n",
    "            x = layer(x)\n",
    "        x = self.output_layer(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Y_: \n",
      " tensor([[ 0.6168, -0.8047],\n",
      "        [ 0.7257, -0.6539],\n",
      "        [ 1.1125, -0.6947],\n",
      "        [ 0.5671, -0.5762],\n",
      "        [ 0.6519, -0.6762],\n",
      "        [ 0.6818, -0.5978],\n",
      "        [ 1.0547, -0.8539],\n",
      "        [ 0.5593, -0.5961],\n",
      "        [ 0.6039, -0.5410],\n",
      "        [ 1.0108, -0.9002]], grad_fn=<AddmmBackward0>)\n"
     ]
    }
   ],
   "source": [
    "dim_in, dim_hiddens, dim_out = 20, (5, 3), 2\n",
    "n = 10\n",
    "X = torch.normal(2.0, 1.0, (n, dim_in))\n",
    "\n",
    "mdl = MLP(dim_in, dim_hiddens, dim_out)\n",
    "Y_ = mdl(X)\n",
    "print('Y_: \\n', Y_)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 常用激活函数及其特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Sigmoid:\n",
    "\n",
    "$f(x) = \\frac{1}{1+e^{-x}}$\n",
    "\n",
    "\n",
    "- 优点：\n",
    "    1. 平滑性好，$f^{'}(x) = f(x)(1-f(x))$\n",
    "    2. 值域为(0, 1)，概率解释友好\n",
    "- 缺点\n",
    "    1. 运算量大\n",
    "    2. 梯度消失\n",
    "    3. 均值非0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbcAAAEmCAYAAADhrd4NAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAsNUlEQVR4nO3de1xUdcI/8M/MADPcZhSQARSRvFKkJF4Cc9MsjMzNXR+j3E0t9YnKXMXajcfdTNeWruZT5qW89Fiu+autfVlRSVtesVJCI8VbICAMIKgzXIRhZr6/P5CpkREZbmcun/frNS/jcA7z4SR++J7zPefIhBACREREbkQudQAiIqKuxnIjIiK3w3IjIiK3w3IjIiK3w3IjIiK3w3IjIiK3w3IjIiK3w3IjIiK34yV1gPawWCwoKytDYGAgZDKZ1HGIiEgCQgjU1NQgIiICcnnbYzOXKLeysjJERkZKHYOIiJxASUkJ+vXr1+Y6LlFugYGBAJq/IbVaLXEaIiKSgsFgQGRkpLUT2uIS5dZyKFKtVrPciIg8XHtOT3FCCRERuR2WGxERuR2WGxERuR2Hz7nt3bsXL7/8MnJycqDT6fDxxx9j2rRpbW6zZ88epKWl4dixY4iIiMCf//xnpKamdjSzXUIImEwmmM3mLv26zkChUMDLy4uXQRARtZPD5VZXV4cRI0bg4YcfxvTp06+7fmFhIe655x7Mnz8f7733Hg4cOIDHH38cffr0adf27WE0GqHT6VBfX98lX88Z+fn5ITw8HD4+PlJHISJyeg6XW3JyMpKTk9u9/vr169G/f3+sXr0aABATE4PDhw/jlVde6ZJys1gsKCwshEKhQEREBHx8fNxqhCOEgNFoxPnz51FYWIjBgwdf9+JFIiJP1+2XAhw8eBBJSUk2yyZPnoxNmzahqakJ3t7erbZpbGxEY2Oj9WODwXDNr280GmGxWBAZGQk/P7+uC+5EfH194e3tjaKiIhiNRqhUKqkjEXWIEAL6y02oqjWiurYRtY0m1BvNuGw0o874y3/XG81oMltgslhgMguYLQImS/OfTWaLzccmiwUW0fIGgICAENYPIYSA9dMC1v/GleW/rNu83dXbUteJCVfjtZS4Hnmvbi+38vJyaLVam2VarRYmkwlVVVUIDw9vtU1GRgaWL1/u0Pu4+2jG3b8/ch+XjWYcK9PjTGUtzlbXo6i6DsUX6lFV24jqWiNMFhaGp/LzUfTYe/XIRdxXHyZs+W3oWocP09PTkZaWZv245ap0InI+tY0m7D11HvtOn8eREj1OVdTAfJ0CC1R5IdjfB2pfb/h6K+Dno4Cf0gt+V/7b18cLSi85vOQyKBSy5j/lcngrZFDIf/nYSy6DXC6DXAbI0PzviUwGyK78iVbLrnzcsuxX2+FX67T8y2Tzeeq0QFXP3Tek298pLCwM5eXlNssqKyvh5eWF4OBgu9solUoolcrujkZEHVRvNOHTozp8/pMOB36uhtFksfl8aKASMeFqRIf4IyrYD/2D/KBVqxAc4IMgfx8ovXruN3jyTN1ebgkJCfjkk09slu3atQujRo2ye76NiJzX+ZpGvL2vANu/L0ZNg8m6PCrYD3fFaDFqQBDiInshTMPzwiQth8uttrYWZ86csX5cWFiII0eOICgoCP3790d6ejpKS0uxdetWAEBqairWrFmDtLQ0zJ8/HwcPHsSmTZuwffv2rvsuiKhbNTSZ8eY3Z/D2vgI0NDWP0qKC/TAjvh+SbgrD4NAAt5qlTK7P4XI7fPgwJk6caP245dzY7Nmz8c4770Cn06G4uNj6+ejoaGRmZmLx4sV48803ERERgddff73LrnEjou71bUE1nv7wKEouXAYAxEX2wsJJgzBhSCjkchYaOSeHy23ChAltTo995513Wi27/fbb8cMPPzj6Vh0ihMDlJmnuUuLrrWj3b69bt27F4sWLUVZWZnN+cfr06fD397eOfImkYrEIvP71abz+n9OwCCBco8Kz996Iu2PDOEojp+cSj7xxxOUmM2589ktJ3vv4isnw82nfLp0xYwYWLlyInTt3YsaMGQCAqqoqfPrpp/jiiy+6MybRdTU0mbHkg6P47EcdAOC/4vvhud/ehACl2/2TQW6KF09JxNfXFzNnzsSWLVusy7Zt24Z+/fphwoQJ0gUjj3fZaMacLd/jsx918FbI8PJ/DccrM0aw2MiluN3fVl9vBY6vmCzZezti/vz5GD16NEpLS9G3b19s2bIFc+bM4SEfkozRZMFj23LwbcEFBCi9sOGheIwbFCJ1LCKHuV25yWSydh8alNott9yCESNGYOvWrZg8eTLy8vJaXTZB1FOEEPjzh0ex++R5+Hor8M7DozFqQJDUsYg6xDVawI3NmzcPr732GkpLS3HnnXfyTiwkmS0HzuLfR8rgJZdhw0PxLDZyaTznJrE//OEPKC0txdtvv41HHnlE6jjkoQ6fvYB/ZOYDAJZOicFvhvSROBFR57DcJKZWqzF9+nQEBARc96GvRN2h3mhC2v87CpNF4L64CMxJHCB1JKJOY7k5AZ1Ohz/84Q+8nyZJ4pUvT6H4Qj0iNCqsnBbLCU3kFnjOTUIXLlzArl278PXXX2PNmjVSxyEPdKTkErZkFwIA/vH7mxGo4v1eyT2w3CQ0cuRIXLx4ES+++CKGDh0qdRzyMEIIPP/ZcQgB/O6WvpgwNFTqSERdhuUmobNnz0odgTzYl8cqcOjsRai85fjL3cOkjkPUpXjOjcgDNZktePGLEwCA/x5/Ax9RQ27HbcqtrZs5uwN3//6oZ336YxkKq+oQ7O+D/759oNRxiLqcy5dbywNP6+vrJU7SvVq+Pz7glTrLYhFYt/tnAMAjt0XznpHkllz+b7VCoUCvXr1QWVkJAPDz83OrqcxCCNTX16OyshK9evWCQuHY/SuJrvb1iUqcqqhFgNILf7w1Suo4RN3C5csNAMLCwgDAWnDuqFevXtbvk6gzNuxtHrX98dYoaHx5JIDck1uUm0wmQ3h4OEJDQ9HU1CR1nC7n7e3NERt1iRPlBhw6exFechkeGTdA6jhE3cYtyq2FQqFgCRC14Z/fFQMA7rpRi1A1Z0iS+3L5CSVE1D71RhM+/qEUADBzbH+J0xB1L5YbkYf49KgONY0m9A/yw7iBfAApuTeWG5GH+PCHcwCAB8ZEQi53nxnFRPaw3Ig8gE5/GYfOXgAATIvrK3Eaou7HciPyAJ8e1UEIYPSA3ojo5St1HKJux3Ij8gCf/FgGAPjtiAiJkxD1DJYbkZsrrKrDj+f0UMhlSL45XOo4RD2C5Ubk5j7/SQcASBwYjJAAPu2dPAPLjcjNfXW8AgCQdBNv30aeg+VG5MaqahuRW3IJAHBnDJ+0TZ6D5Ubkxr45UQkhgJsi1AjXcJYkeQ6WG5Eb+09+85MyJsVoJU5C1LNYbkRuqqHJjL2nzwMA7mK5kYfpULmtXbsW0dHRUKlUiI+Px759+9pcf9u2bRgxYgT8/PwQHh6Ohx9+GNXV1R0KTETtk1N0EfVGM0IDlYjtq5Y6DlGPcrjcduzYgUWLFmHp0qXIzc3F+PHjkZycjOLiYrvr79+/H7NmzcLcuXNx7NgxfPDBBzh06BDmzZvX6fBEdG37z1QBAG4bHOJWT6cnag+Hy23VqlWYO3cu5s2bh5iYGKxevRqRkZFYt26d3fW//fZbDBgwAAsXLkR0dDRuu+02PProozh8+HCnwxPRtWW3lNsgPgGAPI9D5WY0GpGTk4OkpCSb5UlJScjOzra7TWJiIs6dO4fMzEwIIVBRUYEPP/wQU6ZMueb7NDY2wmAw2LyIqP309U34sVQPABjHciMP5FC5VVVVwWw2Q6u1PTmt1WpRXl5ud5vExERs27YNKSkp8PHxQVhYGHr16oU33njjmu+TkZEBjUZjfUVGRjoSk8jjHSyoghDA4NAAaPnEbfJAHZpQcvXxeyHENY/pHz9+HAsXLsSzzz6LnJwcfPHFFygsLERqauo1v356ejr0er31VVJS0pGYRB6r5XwbR23kqbwcWTkkJAQKhaLVKK2ysrLVaK5FRkYGxo0bh6effhoAMHz4cPj7+2P8+PFYuXIlwsNb38hVqVRCqeQ98Ig66sCZ5tnIPN9GnsqhkZuPjw/i4+ORlZVlszwrKwuJiYl2t6mvr4dcbvs2CoUCQPOIj4i6VqWhAYVVdZDJgDE3BEkdh0gSDh+WTEtLw8aNG7F582bk5+dj8eLFKC4uth5mTE9Px6xZs6zrT506FR999BHWrVuHgoICHDhwAAsXLsSYMWMQEcFnSxF1tUNnLwIAYsLUUKu8JU5DJA2HDksCQEpKCqqrq7FixQrodDrExsYiMzMTUVFRAACdTmdzzducOXNQU1ODNWvWYMmSJejVqxfuuOMOvPjii133XRCR1aGzFwA0P3WbyFPJhAscGzQYDNBoNNDr9VCreacForZMeX0fjpUZsGbmLbh3OI+OkPtwpAt4b0kiN1LT0IR8XfN1oaMH8HwbeS6WG5Eb+aH4EiwC6B/kx+vbyKOx3IjcyKHC5vNto3i+jTwcy43IjRwuulJuUTwkSZ6N5UbkJswWgbxzzfeTHBnVS9owRBJjuRG5iTOVtagzmuHvo8Dg0ECp4xBJiuVG5CaOlDRfvH1zPw0Ucj6/jTwby43ITRwpuQQAiIvkZBIilhuRm8gtvgQAiIvUSBuEyAmw3IjcQL3RhFMVNQA4ciMCWG5EbiHvnB4WAYSpVQjT8OJtIpYbkRs4eu4SACAuspekOYicBcuNyA3klTbfT3I4z7cRAWC5EbmFY6XNF2/HRrDciACWG5HLq2loQkFVHQDgpgg+EooIYLkRubzjZc2HJCM0KgQHKCVOQ+QcWG5ELu6nK+UW25eHJIlasNyIXNxPLefbWG5EViw3Ihf3S7nxfBtRC5YbkQurN5rw8/laAJwpSfRrLDciF5avq4FFAH0ClQhV884kRC1YbkQu7LiueTIJLwEgssVyI3Jh+VfKLSac5Ub0ayw3IhfGciOyj+VG5KIsFoGT5c2PubkxPFDiNETOheVG5KKKLtSj3miG0kuOAcH+UschciosNyIX1XJIcmhYILwU/FEm+jX+RBC5KOv5tjCebyO6GsuNyEX9MpmE59uIrsZyI3JR+brmySScKUnUGsuNyAUZGppQeukyAGAYy42olQ6V29q1axEdHQ2VSoX4+Hjs27evzfUbGxuxdOlSREVFQalUYuDAgdi8eXOHAhMRcLqiedQWrlFB4+stcRoi5+Pl6AY7duzAokWLsHbtWowbNw4bNmxAcnIyjh8/jv79+9vd5v7770dFRQU2bdqEQYMGobKyEiaTqdPhiTzVyfLmmyUP0fJ8G5E9DpfbqlWrMHfuXMybNw8AsHr1anz55ZdYt24dMjIyWq3/xRdfYM+ePSgoKEBQUBAAYMCAAZ1LTeThTl0ZuQ3RBkichMg5OXRY0mg0IicnB0lJSTbLk5KSkJ2dbXebnTt3YtSoUXjppZfQt29fDBkyBE899RQuX758zfdpbGyEwWCweRHRL34pN47ciOxxaORWVVUFs9kMrVZrs1yr1aK8vNzuNgUFBdi/fz9UKhU+/vhjVFVV4fHHH8eFCxeued4tIyMDy5cvdyQakUdpKbehYSw3Ins6NKFEJpPZfCyEaLWshcVigUwmw7Zt2zBmzBjcc889WLVqFd55551rjt7S09Oh1+utr5KSko7EJHJL1bWNqKo1AgAGhfKwJJE9Do3cQkJCoFAoWo3SKisrW43mWoSHh6Nv377QaH55SnBMTAyEEDh37hwGDx7cahulUgmlUulINCKPcaqieTJJ/yA/+Pk4fNqcyCM4NHLz8fFBfHw8srKybJZnZWUhMTHR7jbjxo1DWVkZamtrrctOnToFuVyOfv36dSAykWfjZBKi63P4sGRaWho2btyIzZs3Iz8/H4sXL0ZxcTFSU1MBNB9SnDVrlnX9mTNnIjg4GA8//DCOHz+OvXv34umnn8YjjzwCX1/frvtOiDwEJ5MQXZ/DxzRSUlJQXV2NFStWQKfTITY2FpmZmYiKigIA6HQ6FBcXW9cPCAhAVlYWnnzySYwaNQrBwcG4//77sXLlyq77Log8CCeTEF2fTAghpA5xPQaDARqNBnq9Hmo1bzVEnksIgRHLd8HQYELmwvG4MYI/D+Q5HOkC3luSyIVU1jTC0GCCQi7DDX34gFKia2G5EbmQk+XNhySjgv2g8lZInIbIebHciFyI9XwbJ5MQtYnlRuRCOFOSqH1YbkQu5GQFnwZA1B4sNyIXYbEI63PchobxAm6itrDciFxE6aXLqDea4aOQIyqYMyWJ2sJyI3IRLefbbujjD28Ff3SJ2sKfECIXcYrn24jajeVG5CJ4w2Si9mO5EbmIlgu4OXIjuj6WG5ELMFsEzpxvPizJGyYTXR/LjcgFFFXXwWiyQOUtR2RvP6njEDk9lhuRC2g53zY4NBByuUziNETOj+VG5AJaZkoO5mQSonZhuRG5gJO8YTKRQ1huRC6g5bZbQziZhKhdWG5ETs5osqDgfB0AXgZA1F4sNyInV1hVB5NFIEDphQiNSuo4RC6B5Ubk5KwzJbUBkMk4U5KoPVhuRE6OT98mchzLjcjJ8bZbRI5juRE5udOVfBoAkaNYbkROrKHJjLPVV2ZK8unbRO3GciNyYmcqayEE0NvPG30ClFLHIXIZLDciJ/bLTMlAzpQkcgDLjciJ8bZbRB3DciNyYqcrWiaT8HwbkSNYbkROjJcBEHUMy43ISdU2mlB66TIAlhuRo1huRE6qZdTWJ1CJ3v4+Eqchci0dKre1a9ciOjoaKpUK8fHx2LdvX7u2O3DgALy8vBAXF9eRtyXyKCfKDQCAmHC1xEmIXI/D5bZjxw4sWrQIS5cuRW5uLsaPH4/k5GQUFxe3uZ1er8esWbMwadKkDocl8iQtI7cYPsONyGEOl9uqVaswd+5czJs3DzExMVi9ejUiIyOxbt26Nrd79NFHMXPmTCQkJHQ4LJEnOaFrLrdh4Sw3Ikc5VG5GoxE5OTlISkqyWZ6UlITs7Oxrbrdlyxb8/PPPWLZsWbvep7GxEQaDweZF5EmEEMi/clhyWBgPSxI5yqFyq6qqgtlshlartVmu1WpRXl5ud5vTp0/jmWeewbZt2+Dl5dWu98nIyIBGo7G+IiMjHYlJ5PLK9A2oaTDBSy7DwD68xo3IUR2aUHL1bYCEEHZvDWQ2mzFz5kwsX74cQ4YMaffXT09Ph16vt75KSko6EpPIZZ3QNY/aBvYJgI8XJzUTOap9Q6krQkJCoFAoWo3SKisrW43mAKCmpgaHDx9Gbm4uFixYAACwWCwQQsDLywu7du3CHXfc0Wo7pVIJpZI3iSXPdaKc59uIOsOhXwl9fHwQHx+PrKwsm+VZWVlITExstb5arUZeXh6OHDlifaWmpmLo0KE4cuQIxo4d27n0RG7KWm4830bUIQ6N3AAgLS0NDz30EEaNGoWEhAS89dZbKC4uRmpqKoDmQ4qlpaXYunUr5HI5YmNjbbYPDQ2FSqVqtZyIftFyWJIjN6KOcbjcUlJSUF1djRUrVkCn0yE2NhaZmZmIiooCAOh0uute80ZE19bQZEZBVfMDSmM4ciPqEJkQQkgd4noMBgM0Gg30ej3Uav6wk3v7qVSPe9/YD42vN448exef40Z0hSNdwGlYRE7mpPV8Gx9QStRRLDciJ8N7ShJ1HsuNyMmc+NXIjYg6huVG5GTyrfeU5MiNqKNYbkROpKq2EVW1jZDJgCFa3naLqKNYbkROpGUySVSQH/x8HL5Sh4iuYLkROZF8HZ8EQNQVWG5ETqTlfNtQTiYh6hSWG5ETOVamBwDE9tVInITItbHciJxEQ5MZpytrAQCxfXlYkqgzWG5ETuJEeQ3MFoFgfx+EqVVSxyFyaSw3IieRV9p8SPKmvhredouok1huRE7i2JVyu5mHJIk6jeVG5CR+aplMEsHJJESdxXIjcgKNJrP1Am7OlCTqPJYbkRM4XVGLJrOAxtcb/Xr7Sh2HyOWx3IicgHUySYSak0mIugDLjcgJHC25BAAYEdlL0hxE7oLlRuQEjrSUW79ekuYgchcsNyKJ1TWacKqieTLJLf17SRuGyE2w3Igklleqh0UA4RoVtLwzCVGXYLkRSYyHJIm6HsuNSGJHii8BAOJ4SJKoy7DciCTWMnKL40xJoi7DciOSULm+AeWGBshlwM28MwlRl2G5EUkop+giAGBomBr+Si+J0xC5D5YbkYQOnb0AABgzoLfESYjcC8uNSEIt5TZqQJDESYjcC8uNSCI1DU3I1xkAAKNZbkRdiuVGJJEfii/BIoDIIF+EaXjxNlFXYrkRSeRQYfMhSY7aiLpeh8pt7dq1iI6OhkqlQnx8PPbt23fNdT/66CPcdddd6NOnD9RqNRISEvDll192ODCRu/hlMgnLjairOVxuO3bswKJFi7B06VLk5uZi/PjxSE5ORnFxsd319+7di7vuuguZmZnIycnBxIkTMXXqVOTm5nY6PJGramgyWy/e5mQSoq4nE0IIRzYYO3YsRo4ciXXr1lmXxcTEYNq0acjIyGjX17jpppuQkpKCZ599tl3rGwwGaDQa6PV6qNVqR+ISOaXsM1WYufE7aNVKfJs+iQ8oJWoHR7rAoZGb0WhETk4OkpKSbJYnJSUhOzu7XV/DYrGgpqYGQUHX/m21sbERBoPB5kXkTvadqQIAjBsUwmIj6gYOlVtVVRXMZjO0Wq3Ncq1Wi/Ly8nZ9jVdffRV1dXW4//77r7lORkYGNBqN9RUZGelITCKnd6Cl3AaGSJyEyD11aELJ1b9pCiHa9dvn9u3b8dxzz2HHjh0IDQ295nrp6enQ6/XWV0lJSUdiEjmlS/VG5JXqATSP3Iio6zl0M7uQkBAoFIpWo7TKyspWo7mr7dixA3PnzsUHH3yAO++8s811lUollEqlI9GIXMbBn6shBDAoNIDXtxF1E4dGbj4+PoiPj0dWVpbN8qysLCQmJl5zu+3bt2POnDn45z//iSlTpnQsKZGb2H/lkORtHLURdRuHb0OelpaGhx56CKNGjUJCQgLeeustFBcXIzU1FUDzIcXS0lJs3boVQHOxzZo1C//7v/+LW2+91Trq8/X1hUbDR3yQZxFCYPfJ8wBYbkTdyeFyS0lJQXV1NVasWAGdTofY2FhkZmYiKioKAKDT6WyueduwYQNMJhOeeOIJPPHEE9bls2fPxjvvvNP574DIhZwor0HppctQesl5vo2oGzl8nZsUeJ0buYs1X5/GK7tOYdKwUGyaM1rqOEQupduucyOizsnKrwQATIppewIWEXUOy42oh1TWNODolVtuTYq59qUwRNR5LDeiHvLNieZR2819NdCqeQkAUXdiuRH1kE9/1AEAkm7kIUmi7sZyI+oBVbWNyP65GgAwdUSExGmI3B/LjagHfJ6ng9kiMLyfBgNC/KWOQ+T2WG5EPWDn0TIAwNThHLUR9QSWG1E3K710GYfOXoRMBtw7IlzqOEQegeVG1M0+ONz8VIsxA4IQrvGVOA2RZ2C5EXUjk9mCHYeay23m2P4SpyHyHCw3om60++R56PQN6O3njbtjw6SOQ+QxWG5E3eif3zffRPy/4vtB6aWQOA2R52C5EXWTwqo6fHOy+a4kD47hIUminsRyI+omb+39GUIAk4aF4oY+AVLHIfIoLDeiblBhaMC/ckoBAI9NGChxGiLPw3Ij6gYb9xXAaLZg9IDeGDUgSOo4RB6H5UbUxXT6y9h6sAgA8PiEQRKnIfJMLDeiLvbKl6fQaLJgzIAgTBjaR+o4RB6J5UbUhX4q1eOj3HMAgKVTYiCTySROROSZWG5EXcRktmDpv3+CEMB9cREYEdlL6khEHovlRtRFNu0vxNGSSwhUeSE9OUbqOEQejeVG1AVOV9Tg1axTAIC/TbkRYRqVxImIPBvLjaiTahqa8Oh7OTCaLBg/OAQzRvWTOhKRx2O5EXWCxSLw9Ac/ouB8HcLUKryWEsdJJEROgOVG1EFCCDz3yTF8cawc3goZ1v5xJEIClFLHIiKw3Ig6RAiBF744ga0HiyCTAa/MGIGR/XtLHYuIrvCSOgCRq2kyW5D+UR4+zGm+nm3ltFjcF9dX4lRE9GssNyIHlF26jEXvH8H3Zy9AIZdh5bRYPs6GyAmx3IjaQQiBf/1QipWfHcel+iYEKL3w+oNxuGOYVupoRGQHy43oOr4tqMYrX57E4aKLAIDh/TR448FbEBXsL3EyIroWlhuRHY0mM748VoF3D57FobPNpebrrcCf7hyMR8ZFw8eLc7GInFmHfkLXrl2L6OhoqFQqxMfHY9++fW2uv2fPHsTHx0OlUuGGG27A+vXrOxSWqDudr2nEJ0fL8NQHRzFq5VdYuD0Xh85ehLdChj/e2h9fP3U7Um8fyGIjcgEOj9x27NiBRYsWYe3atRg3bhw2bNiA5ORkHD9+HP37tz6xXlhYiHvuuQfz58/He++9hwMHDuDxxx9Hnz59MH369C75JogcIYRAVa0RpytqcKqiBicranD47EWcrqy1WU+rVuKB0f3x4Jj+vJ0WkYuRCSGEIxuMHTsWI0eOxLp166zLYmJiMG3aNGRkZLRa/y9/+Qt27tyJ/Px867LU1FQcPXoUBw8ebNd7GgwGaDQa6PV6qNVqR+KSGxFCwGwRMFkEjGYLTGYBk9mCJotAQ5MZtQ0m1DWaUNPY/GftldfFOiPKDY2o0Deg3NCACkMDGk0Wu+9xY7gaCQODkXSjFqMHBEEu591GiJyFI13g0MjNaDQiJycHzzzzjM3ypKQkZGdn293m4MGDSEpKslk2efJkbNq0CU1NTfD29m61TWNjIxobG60fGwwGR2Latebr0/jkqM5mmYBtr9ureXvNb+/3gVZLuvBr2c9lZzt767XjV5duz9CuTPa/VpPZgiazgMnS/GdXkcmA/kF+GKINxFBtIGL7qjE2Ohi9/X267D2ISDoOlVtVVRXMZjO0Wtvpz1qtFuXl5Xa3KS8vt7u+yWRCVVUVwsPDW22TkZGB5cuXOxLtuioMjThZUdOlX5OkJ5cBXgo5lF5yBCq9EKDygr/SCwFXXv5KL2h8vRGmVkGrUSFM3fwKVSuh8lZIHZ+IukmHZktefWNYIUSbN4u1t7695S3S09ORlpZm/dhgMCAyMrIjUa1mJw7A3bFhtrnshrW3yHahvdhXL7L3vV29yN77t96so1/HznbXfa/2fa/Xy9Per9P6+2i9kpdCBh+FHF4KGbzkcngrZPBSyOEll8FbIYeChw2JyA6Hyi0kJAQKhaLVKK2ysrLV6KxFWFiY3fW9vLwQHBxsdxulUgmlsmtvQDsoNACDQgO69GsSEZFzcmhOs4+PD+Lj45GVlWWzPCsrC4mJiXa3SUhIaLX+rl27MGrUKLvn24iIiDrL4Qt20tLSsHHjRmzevBn5+flYvHgxiouLkZqaCqD5kOKsWbOs66empqKoqAhpaWnIz8/H5s2bsWnTJjz11FNd910QERH9isPn3FJSUlBdXY0VK1ZAp9MhNjYWmZmZiIqKAgDodDoUFxdb14+OjkZmZiYWL16MN998ExEREXj99dd5jRsREXUbh69zkwKvcyMiIke6gPcRIiIit8NyIyIit8NyIyIit+MSj7xpOS3YFbfhIiIi19TSAe2ZKuIS5VZT03zbrM7epYSIiFxfTU0NNBpNm+u4xGxJi8WCsrIyBAYGtnmbr7a03MKrpKTEJWZculpewPUyM2/3Yt7u5Yl5hRCoqalBREQE5PK2z6q5xMhNLpejX79+XfK11Gq1S/xFaOFqeQHXy8y83Yt5u5en5b3eiK0FJ5QQEZHbYbkREZHb8ZhyUyqVWLZsWZc/baC7uFpewPUyM2/3Yt7uxbxtc4kJJURERI7wmJEbERF5DpYbERG5HZYbERG5HZYbERG5Hbcvt7Nnz2Lu3LmIjo6Gr68vBg4ciGXLlsFoNNqsV1xcjKlTp8Lf3x8hISFYuHBhq3V6yvPPP4/ExET4+fmhV69edteRyWStXuvXr+/ZoFe0J68z7V97BgwY0Gp/PvPMM1LHslq7di2io6OhUqkQHx+Pffv2SR3Jrueee67VfgwLC5M6ltXevXsxdepUREREQCaT4d///rfN54UQeO655xAREQFfX19MmDABx44dkyYsrp93zpw5rfb3rbfeKk1YABkZGRg9ejQCAwMRGhqKadOm4eTJkzbr9NQ+dvtyO3HiBCwWCzZs2IBjx47htddew/r16/E///M/1nXMZjOmTJmCuro67N+/H++//z7+9a9/YcmSJZJkNhqNmDFjBh577LE219uyZQt0Op31NXv27B5KaOt6eZ1t/15Ly9PlW15//etfpY4EANixYwcWLVqEpUuXIjc3F+PHj0dycrLNE++dyU033WSzH/Py8qSOZFVXV4cRI0ZgzZo1dj//0ksvYdWqVVizZg0OHTqEsLAw3HXXXdb72/a06+UFgLvvvttmf2dmZvZgQlt79uzBE088gW+//RZZWVkwmUxISkpCXV2ddZ0e28fCA7300ksiOjra+nFmZqaQy+WitLTUumz79u1CqVQKvV4vRUQhhBBbtmwRGo3G7ucAiI8//rhH81zPtfI66/79taioKPHaa69JHcOuMWPGiNTUVJtlw4YNE88884xEia5t2bJlYsSIEVLHaJerf4YsFosICwsTL7zwgnVZQ0OD0Gg0Yv369RIktGXvZ3727NnivvvukyRPe1RWVgoAYs+ePUKInt3Hbj9ys0ev1yMoKMj68cGDBxEbG4uIiAjrssmTJ6OxsRE5OTlSRGyXBQsWICQkBKNHj8b69ethsVikjmSXq+zfF198EcHBwYiLi8Pzzz/vFIdNjUYjcnJykJSUZLM8KSkJ2dnZEqVq2+nTpxEREYHo6Gg88MADKCgokDpSuxQWFqK8vNxmXyuVStx+++1Ou68BYPfu3QgNDcWQIUMwf/58VFZWSh3JSq/XA4D139ue3McucePkrvTzzz/jjTfewKuvvmpdVl5eDq1Wa7Ne79694ePjg/Ly8p6O2C5///vfMWnSJPj6+uI///kPlixZgqqqKqc5lPZrrrB///SnP2HkyJHo3bs3vv/+e6Snp6OwsBAbN26UNFdVVRXMZnOr/afVap1m3/3a2LFjsXXrVgwZMgQVFRVYuXIlEhMTcezYMQQHB0sdr00t+9Pevi4qKpIi0nUlJydjxowZiIqKQmFhIf72t7/hjjvuQE5OjuR3LhFCIC0tDbfddhtiY2MB9Ow+dtmRm70T11e/Dh8+bLNNWVkZ7r77bsyYMQPz5s2z+Zy9R+kIITr8iJ2uyNuWv/71r0hISEBcXByWLFmCFStW4OWXX+6SrN2Rt7v3rz2OfA+LFy/G7bffjuHDh2PevHlYv349Nm3ahOrq6m7L54ir91N377uOSk5OxvTp03HzzTfjzjvvxGeffQYA+L//+z+Jk7Wfq+xrAEhJScGUKVMQGxuLqVOn4vPPP8epU6es+11KCxYswI8//ojt27e3+lxP7GOXHbktWLAADzzwQJvrDBgwwPrfZWVlmDhxIhISEvDWW2/ZrBcWFobvvvvOZtnFixfR1NTU6jeMnsrrqFtvvRUGgwEVFRVdkrkr8/bE/rWnM99Dy4yzM2fOSDriCAkJgUKhaDVKq6ys7NZ911X8/f1x88034/Tp01JHua6WWZ3l5eUIDw+3LneVfQ0A4eHhiIqKknx/P/nkk9i5cyf27t1r87iyntzHLltuISEhCAkJade6paWlmDhxIuLj47Fly5ZWD7lLSEjA888/D51OZ93hu3btglKpRHx8fI/n7Yjc3FyoVKprTsV3VFfm7Yn9a09nvofc3FwAsPkBlIKPjw/i4+ORlZWF3/3ud9blWVlZuO+++yRM1j6NjY3Iz8/H+PHjpY5yXdHR0QgLC0NWVhZuueUWAM3nPPfs2YMXX3xR4nTtU11djZKSEsn+3goh8OSTT+Ljjz/G7t27ER0dbfP5Ht3HXTo9xQmVlpaKQYMGiTvuuEOcO3dO6HQ666uFyWQSsbGxYtKkSeKHH34QX331lejXr59YsGCBJJmLiopEbm6uWL58uQgICBC5ubkiNzdX1NTUCCGE2Llzp3jrrbdEXl6eOHPmjHj77beFWq0WCxcudMq8zrZ/r5adnS1WrVolcnNzRUFBgdixY4eIiIgQv/3tb6WOJoQQ4v333xfe3t5i06ZN4vjx42LRokXC399fnD17VuporSxZskTs3r1bFBQUiG+//Vbce++9IjAw0Gmy1tTUWP9+ArD+fy8qKhJCCPHCCy8IjUYjPvroI5GXlycefPBBER4eLgwGg9PlrampEUuWLBHZ2dmisLBQfPPNNyIhIUH07dtXsryPPfaY0Gg0Yvfu3Tb/1tbX11vX6al97PbltmXLFgHA7uvXioqKxJQpU4Svr68ICgoSCxYsEA0NDZJknj17tt2833zzjRBCiM8//1zExcWJgIAA4efnJ2JjY8Xq1atFU1OTU+YVwrn279VycnLE2LFjhUajESqVSgwdOlQsW7ZM1NXVSR3N6s033xRRUVHCx8dHjBw50jq12tmkpKSI8PBw4e3tLSIiIsTvf/97cezYMaljWX3zzTd2/67Onj1bCNE8VX3ZsmUiLCxMKJVK8Zvf/Ebk5eU5Zd76+nqRlJQk+vTpI7y9vUX//v3F7NmzRXFxsWR5r/Vv7ZYtW6zr9NQ+5iNviIjI7bjsbEkiIqJrYbkREZHbYbkREZHbYbkREZHbYbkREZHbYbkREZHbYbkREZHbYbkREZHbYbkREZHbYbkREZHbYbkRObHz588jLCwM//jHP6zLvvvuO/j4+GDXrl0SJiNybry3JJGTy8zMxLRp05CdnY1hw4bhlltuwZQpU7B69WqpoxE5LZYbkQt44okn8NVXX2H06NE4evQoDh06BJVKJXUsIqfFciNyAZcvX0ZsbCxKSkpw+PBhDB8+XOpIRE6N59yIXEBBQQHKyspgsVhQVFQkdRwip8eRG5GTMxqNGDNmDOLi4jBs2DCsWrUKeXl50Gq1UkcjclosNyIn9/TTT+PDDz/E0aNHERAQgIkTJyIwMBCffvqp1NGInBYPSxI5sd27d2P16tV49913oVarIZfL8e6772L//v1Yt26d1PGInBZHbkRE5HY4ciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfDciMiIrfz/wG1Q/mG2vXZJAAAAABJRU5ErkJggg==",
      "text/plain": [
       "<Figure size 500x300 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "df = pd.DataFrame({'x': np.linspace(-20, 20, 200)})\n",
    "df['y'] = df['x'].apply(sigmoid)\n",
    "_ = df.plot('x', 'y', figsize=(5, 3))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "[从ReLU到Sinc，26种神经网络激活函数可视化](https://mp.weixin.qq.com/s/7DgiXCNBS5vb07WIKTFYRQ)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 卷积神经网络CNN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 循环神经网络RNN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 门控循环单元GRU"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 长短时记忆网络LSTM"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 残差网络ResNet"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 生成对抗网络GAN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Tokenizer\n",
    "\n",
    "简单理解，tokenizer就是对词表进行编码和解码，把文本转化为数字编码\n",
    "\n",
    "TODO: 常用tokenizer算法原理学习（如：BPE）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "编码结果：\n",
      "input_ids, 18: [101, 7592, 1010, 2026, 2171, 2003, 5035, 2072, 1012, 100, 100, 1989, 1855, 100, 5035, 2072, 1636, 102]\n",
      "token_type_ids, 18: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "attention_mask, 18: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n",
      "\n",
      "解码结果：\n",
      "[CLS] hello, my name is kimi. [UNK] [UNK] ， 我 [UNK] kimi 。 [SEP]\n"
     ]
    }
   ],
   "source": [
    "# bert分词器\n",
    "from transformers import BertTokenizer\n",
    "\n",
    "# 加载预训练的分词器\n",
    "# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')  # 从hf网站下载\n",
    "tokenizer = BertTokenizer.from_pretrained(f'{ORI_MODELS_DIR}/bert-base-uncased')  # 本地加载\n",
    "\n",
    "# 使用分词器处理文本\n",
    "input_text = \"Hello, my name is Kimi. 你好，我是Kimi。\"\n",
    "encoded_input = tokenizer(input_text)\n",
    "print('编码结果：')\n",
    "for k, v in encoded_input.items():\n",
    "    print(f'{k}, {len(v)}:', v)\n",
    "    \n",
    "print('\\n解码结果：')\n",
    "print(tokenizer.decode(encoded_input['input_ids']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1855\n",
      "[CLS]hello,mynameiskim##i.[UNK][UNK]，我[UNK]kim##i。[SEP]"
     ]
    }
   ],
   "source": [
    "print(tokenizer.vocab['我'])\n",
    "id_token = {v: k for k, v in tokenizer.vocab.items()}\n",
    "for input_id in encoded_input['input_ids']:\n",
    "    print(id_token[input_id], end='')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "编码结果：\n",
      "input_ids, 31: [1, 15043, 29892, 590, 1024, 338, 476, 10233, 29889, 29871, 30919, 31076, 30214, 30672, 30392, 29968, 10233, 30267, 232, 150, 139, 232, 150, 139, 232, 155, 194, 232, 155, 194, 30267]\n",
      "attention_mask, 31: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n",
      "\n",
      "解码结果：\n",
      "<s> Hello, my name is Kimi. 你好，我是Kimi。哈哈嘿嘿。\n"
     ]
    }
   ],
   "source": [
    "# llama2-7b分词器\n",
    "from transformers import AutoTokenizer\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(f'{ORI_MODELS_DIR}/Llama-2-7b-hf')  # 本地加载\n",
    "\n",
    "# 使用分词器处理文本\n",
    "input_text = \"Hello, my name is Kimi. 你好，我是Kimi。哈哈嘿嘿。\"\n",
    "encoded_input = tokenizer(input_text)\n",
    "print('编码结果：')\n",
    "for k, v in encoded_input.items():\n",
    "    print(f'{k}, {len(v)}:', v)\n",
    "    \n",
    "print('\\n解码结果：')\n",
    "print(tokenizer.decode(encoded_input['input_ids']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "30672\n",
      "<s>▁Hello,▁my▁name▁is▁Kimi.▁你好，我是Kimi。<0xE5><0x93><0x88><0xE5><0x93><0x88><0xE5><0x98><0xBF><0xE5><0x98><0xBF>。"
     ]
    }
   ],
   "source": [
    "print(tokenizer.vocab['我'])\n",
    "id_token = {v: k for k, v in tokenizer.vocab.items()}\n",
    "for input_id in encoded_input['input_ids']:\n",
    "    print(id_token[input_id], end='')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "LlamaTokenizerFast(name_or_path='D:/Genlovy_Hoo/HooProjects/HooLLM/ori_models/Llama-2-7b-hf', vocab_size=32000, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>'}, clean_up_tokenization_spaces=False),  added_tokens_decoder={\n",
       "\t0: AddedToken(\"<unk>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t1: AddedToken(\"<s>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "}"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Transformer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "vocab_size = tokenizer.vocab_size\n",
    "d_model = 512  # 模型维度或词向量维度\n",
    "nhead = 8\n",
    "seq_len = 200  # 样本（句子）长度\n",
    "n = 30"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Embedding\n",
    "\n",
    "embedding用于将非结构化数据编码为结构化数据（比如将token编码转化为词向量）\n",
    "\n",
    "TODO: 学习常用的embedding算法原理（[主流Embedding算法介绍](https://zhuanlan.zhihu.com/p/630061933)）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([31, 512])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[-1.5866,  1.0326, -1.4967,  ..., -2.1391, -0.7206,  2.4755],\n",
       "        [-0.6957, -0.8220, -0.5809,  ...,  0.5680,  0.0855, -0.4322],\n",
       "        [-0.4889,  0.7077,  1.0359,  ..., -1.0655, -0.4152,  0.0633],\n",
       "        ...,\n",
       "        [-0.1311, -0.9042,  0.5188,  ..., -0.1801,  0.7255, -0.9138],\n",
       "        [ 1.4697,  1.9553,  0.1550,  ...,  0.8665, -1.3479, -0.0456],\n",
       "        [-0.1036, -1.2250,  0.1981,  ..., -0.1204,  0.4708,  0.0042]],\n",
       "       grad_fn=<EmbeddingBackward0>)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "embd = torch.nn.Embedding(vocab_size, d_model)\n",
    "\n",
    "e = embd(torch.tensor(encoded_input['input_ids']))\n",
    "# e = embd(torch.tensor([2, 5]))\n",
    "print(e.shape)\n",
    "e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([32000, 512])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Parameter containing:\n",
       "tensor([[ 2.8368,  0.1544,  1.6064,  ..., -0.4845,  0.5296,  0.6206],\n",
       "        [-1.5866,  1.0326, -1.4967,  ..., -2.1391, -0.7206,  2.4755],\n",
       "        [ 0.6528, -0.5766,  1.3303,  ..., -0.1413, -0.4627, -0.0975],\n",
       "        ...,\n",
       "        [ 0.4808, -2.3994,  1.9958,  ...,  1.8550, -0.1452, -1.8287],\n",
       "        [ 2.0295,  0.2720,  0.8316,  ..., -0.2734,  1.2385, -1.1414],\n",
       "        [-0.8215,  0.0625,  0.1886,  ...,  0.2472,  1.3357, -0.6228]],\n",
       "       requires_grad=True)"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(embd.weight.shape)\n",
    "embd.weight"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Encoder"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([30, 200, 512]) torch.Size([30, 200, 512])\n",
      "odict_keys(['self_attn.in_proj_weight', 'self_attn.in_proj_bias', 'self_attn.out_proj.weight', 'self_attn.out_proj.bias', 'linear1.weight', 'linear1.bias', 'linear2.weight', 'linear2.bias', 'norm1.weight', 'norm1.bias', 'norm2.weight', 'norm2.bias'])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "TransformerEncoderLayer(\n",
       "  (self_attn): MultiheadAttention(\n",
       "    (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n",
       "  )\n",
       "  (linear1): Linear(in_features=512, out_features=2048, bias=True)\n",
       "  (dropout): Dropout(p=0.1, inplace=False)\n",
       "  (linear2): Linear(in_features=2048, out_features=512, bias=True)\n",
       "  (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "  (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "  (dropout1): Dropout(p=0.1, inplace=False)\n",
       "  (dropout2): Dropout(p=0.1, inplace=False)\n",
       ")"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "attn = torch.nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, batch_first=True)\n",
    "\n",
    "x = torch.randint(0, 1000, (n, seq_len, d_model)).float()\n",
    "y = attn(x)\n",
    "\n",
    "print(x.shape, y.shape)\n",
    "print(attn.state_dict().keys())\n",
    "attn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Transformer(\n",
       "  (encoder): TransformerEncoder(\n",
       "    (layers): ModuleList(\n",
       "      (0-5): 6 x TransformerEncoderLayer(\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n",
       "        )\n",
       "        (linear1): Linear(in_features=512, out_features=2048, bias=True)\n",
       "        (dropout): Dropout(p=0.1, inplace=False)\n",
       "        (linear2): Linear(in_features=2048, out_features=512, bias=True)\n",
       "        (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "        (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "        (dropout1): Dropout(p=0.1, inplace=False)\n",
       "        (dropout2): Dropout(p=0.1, inplace=False)\n",
       "      )\n",
       "    )\n",
       "    (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "  )\n",
       "  (decoder): TransformerDecoder(\n",
       "    (layers): ModuleList(\n",
       "      (0-5): 6 x TransformerDecoderLayer(\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n",
       "        )\n",
       "        (multihead_attn): MultiheadAttention(\n",
       "          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n",
       "        )\n",
       "        (linear1): Linear(in_features=512, out_features=2048, bias=True)\n",
       "        (dropout): Dropout(p=0.1, inplace=False)\n",
       "        (linear2): Linear(in_features=2048, out_features=512, bias=True)\n",
       "        (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "        (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "        (norm3): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "        (dropout1): Dropout(p=0.1, inplace=False)\n",
       "        (dropout2): Dropout(p=0.1, inplace=False)\n",
       "        (dropout3): Dropout(p=0.1, inplace=False)\n",
       "      )\n",
       "    )\n",
       "    (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "transformer = torch.nn.Transformer(batch_first=True)\n",
    "transformer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# BERT"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# GPT"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# MoE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Dropout"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
