{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "38a72ebc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "\n",
    "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n",
    "batch_size = 8\n",
    "train_dataset = torchvision.datasets.cifar.CIFAR100(root='cifar100', train=False, transform=transform, download=True)\n",
    "test_dataset = torchvision.datasets.cifar.CIFAR100(root='cifar100', train=False, transform=transform, download=True)\n",
    "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n",
    "test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "50090097",
   "metadata": {},
   "source": [
    "## 展示数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "b43b2efc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXAAAABOCAYAAAA5Hk1WAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAABriUlEQVR4nOz9WaxlWZrfh/3WuIcz3DnmISOzsqqyqnom2ewmm6Q4CjJMGpQsyDYIGTDQT34w4AcTfus3AQZs+MkAAduwBBs2AQuwbJGW2mRTFOmep6qurKysnCJjvBF3OuMe1uSHtc+JG1lZlVlDq9hWfIlA3nvPtM/ea3/rG/7//ydSSryyV/bKXtkr+7Nn8id9AK/slb2yV/bKfjh75cBf2St7Za/sz6i9cuCv7JW9slf2Z9ReOfBX9spe2Sv7M2qvHPgre2Wv7JX9GbVXDvyVvbJX9sr+jNqP5MCFEP+2EOLbQoj3hBD/8Md1UK/slb2yV/bKPtvED4sDF0Io4F3gbwEPgd8F/gcppbd/fIf3yl7ZK3tlr+x72Y8Sgf8F4L2U0gcppR74vwJ/78dzWK/slb2yV/bKPsv0j/Dam8CDS78/BH7xk08SQvwq8KsAxphfODw8/BE+8pW9slf2yv7bZ0+ePDlJKR198u8/igMXn/K376rHpJT+EfCPAG7cuJF+9Vd/9Uf4yFf2yl7ZK/tvn/3ar/3a/U/7+4/iwB8Cty/9fgt4/Hle+OD+ezx/9ghiwvmIDwmjBMZKtBVIHXHOIZKiSIo9Afds5OZewWyWOF51nKTEhZEEJCnKT91NjEkgIAIRgVABYwMxaIgSkkAISUya9dLhXOTo2nXuvP4mACF1FFWFMQVaa6SSkCJCCoQQeO9pmoblcoU2lvFojNEKJQRSQNs0tE1DPRpRVgXaGpRSKCURCJRSCClJJFJKKKXR2pBiIvgACGYXMx4/ekAInqoq2d3bZ2//gPsfPiKEQKEitfHEFEkpIgRICQJBApRSaG0QQpBiJMZISiCVREoBKW3/HmJEK0WC7d9iSsQEKaXtPyklSun8mhAJMT+ez3RCaUPwCecFQdUAfOWX/iJISYwRKSXWGkLfE7wnxAhCYIwmfwGNlAqAGAOkQAoeKRVJQBo+zygFKSFIpBRJBKwpQAi8CziX31sqRUxxe84BvPe4LmC05v4736JZLWlXFzx6/w+IArxLSCHQWmK1xIeISAJtJDFBCHmNSS3oXUAkwWRccuVoB6ktnQvsTkdMRjXjyZTJ/g2u3PoKSuVr8enxz/e2pmn4jd/4DQD+4l/8i+zv73/Xc2KMLFdLnj57wqPHD2naNVobtNFYaxnXY774xpepymo4hh+/ff3rX+fhw4f0fc9v//ZvE4YTpZTCWktd11hrcc7Rti3eewC01uzu7nJwcMB0OqWqKqSU1HVNWZYopUgp4ZxjuVyyXC6Htfz9e3ibxy8/TwiBMQaAZj6jX6+RgvwPECL7EyFEvkzSIqQcHss+AwQpJpIg/yzyZwhCvvM2nyckQipijMNaJX+QkCihEBKkFGyWhNQaIS1aCHxKdCl83+/3ozjw3wXeFELcAx4B/wHwP/w8Lzx5/ogPvvMNUoisG4/3iaKQTHYMo11FEB3LWQNRMw2awgiu7iW+Nh7x6Lln+WjNIxd5ViparUjRkM/k5mIBMlFXESETAfAIhPYUlSf6guQVJIkUmpgs81lgveoBtg48EVBaYCqNtdmJCxEHp6joup6AJ648wpQUdUFZWIySKAHgcf2autZMdmrKusIUGq0UIgmstWhrSCRCjCilKYsagcD1HoHiwcfw+OEHeN+CUEwmJVevHvDg4yfZgRvYG4EPgRAcCFAShBSQQCmDtXnDiCkRYyAlUCqhpIQUiNHjXd4EtNIkEjHm54aQ8DE77pji8J4KYy3eB5yLhJDI69MBCaNLeh8IUW4d+O0vfwllLTFGlMo3Zrda0bUtvcvnfTQeobQmSYOQJl+B4IiuI/Qtyhikyk4++oAWghQiSpK/R3KURY3Uht552q7HOZ9vfganbwwpJfqup2scpS14cv9DmtWSvl3x+P0/xMdE8AktJaVVVKXGh0ToI/XIEKKgd4mYEspIXIikCN2kYhSP2NnfI3aOPtUovcvuvuD6gebLX/sS1Whv2Jx+MAd6cXGxdeBvvfUWd+7c+a7nLFYLjp89JclAkoHnp8+QQiK1xBhDPSq5desGd26/tnVgP2578uQJDx8+xDnH22+/Td/3pJTQWlNVFdPplPF4jHOOxWJB0zTEGDHGcPXqVfq+J4RASglrLZPJhLquKYoCIQQhBIQQrNdrvPfbDeKTjnyzQV128pv/X3bg7XLJ+vwMKcBIgR4el1K+CIRUgVIyO1ghQCiEUAQfs8eR2dHHlCAFJAlBHDYBBVqTcpTD8EeEUghpEFKglHjhxKNGqoQd1nn3Gef7h3bgKSUvhPifAv8FoID/Q0rpm5/ntUoJlM5fUakcBVormR4YDm4ZThcNYRGJwSNFYrdQvHGoeet2ol455seRi3XiXEaiBiElIanN3gcpIVXEWodQkUhCI4gyohTIFHJUGRJCgjaSHV0jhcBa9eLk6LxzeudBSGJKaJUvrtYGEBRFSVXVVKMarRVS5oshRUIbRVkWSCURQqClxChFCoGYEhKD1QqEwMVACqCEQGuD0QaBwhaWejwmLcP2lu/7frvDa62oRpq+EzgPEBEiIQSkHB7Q9y4vjktOwzlPVAJwxOAIMaCUJKSQowUYFlpCCogxocgLUCvQOgEpR/NCkiKEKIGISgmRIuLSPRWCx6oCpXS+QYQAIbZRVUqJESOsNriY/55IECO+dzTrNaYoKOsRxhhEgma1IvlIaTWCQPAdTQBT1QQpwWyi+Lzpeu8JOXzPm5UGoRPbYDQlRAjIPlJIQSEShYRCKqRVzNc9ldAkCSoFnA9IFJORxflEbDuOHzxlpy4w3vHwgzOWZzNi12OUZX7vIbYYI4z8ruvx/WyT+Xy/x4UQHB8/4uGjj/Gu4xd+9hf4+MF9lsslbdfS9x2rxYKvf+MPuXJ0Ba2nf2pROGQnWJYlKSW893nT7HuWyyVa6yEYyg65bVu6LruqEAKz2Yzd3V3G4zHr9Zqu6xiPx1RVRVVVTCYTLi4u8N7jvd866U87R5f/vvn58veOIeCdy9G3zOtSSHJ2ngSIBLFDhBdRckLmTbj3OXAUgBQIhow2/5QdvhQILxBIUn5jECCiREmDBJRUSJnxJKFJRLUkGEssKrDV9z3PP0oETkrpnwD/5Ad9XU7BVf4uQmALxc6hRo8js3bNyWmL0ALfJjoCXReJx55WtOxYuFokrojEk3XkGZKdSqBEohOKVkgCoHWEmB2TNFBaQRQJITVRAQmiACQk6bHjxESMqMYvTliMia7v84kXeXNo1h1Ga5TUuRRgLJPJlGo0wiiZF8BwoZVWjCejvEjXa6yWjOoCHzzBB4JzBKcx1mKEQBqNVpIUA51z9L1nujfl1p1bnJ4UeOeIKbFYzLeL0gdP0ziESBSFRmlJjAHnelzfExxA3nQ26d8mwoghlx5izE47xYQxGqWGZSEEMglC7wghDNFOohce3fshxdREmfApkmR+3MeWJjj68CLKkwJi8MQQc8bhc9Rtq4JApGs71ssVhTZEJDFFYkwQA0oI2tU6R1+bhe49UuUoyDuPSB7vejoXIHhkWeIj+fqHiBGCJEROhaWkUgoRPWU92pZVlISdSrJzqCmEoDKaylqsNTjvOe8TSge0scRKsVwsESJwsDdhNKmJMbFcNDy7/4hyVDItCpr5gm+/00KIHFz5Hcpqh8n+dfRwY4phE/s0Z3o5YvzezlYM7xE4Pz8jhsRPf/VnuXHjNm998WvbZ3nvOT8/4Tf+xX+ZM6ntaz/XLftD2aYEctnJhhBomoaqqrZReYyRvu9ZLBaEEFiv18xmM8bjMU3T4Jzj4OCAnZ0dQghYa7l79y7z+ZzT01MuLi62QccnnfgnnXeM8eVzGQN4h1CClBReaUTM6zWR0CQkERETQuUIXA6OWIoh2EnZqee31Rv3zVCLHEot2elsy5A6P18LjUwJEVMuecZNkJ5AfzbE+0dy4D+s5Qg215PLWnPvrUOuvqHp5Iqnx2cgwUlwAtYBTlq438PtdWL/juDQCm4WgmcBngRBSjC2kUIIVBI0AoxyhBDxKX9JrcFWAj+cVCEkUgmEEjl/EB3lxFLUL05JXdeElJBKIaWiLAqcEBit6boeKSXGWI6OxiSRiD7v5EoKJNkZoiSpj8TocX2LYor3npz1e5J3SKOJKZcCguvxIeYImbzBG2vYmU5xzg1lC//CgTtHs16ilECUJSkKIEeZUkhQCiX11lGE4PHRMxqNtuUeISDFhPMJW1iUkiQCMXhSTKzWLQ0tXvih5p3Pq5QSlCACSDC6wHtHSJEgcpthg1ONrifIXMYRQNd1VJNxrgUag+w9bdPSmgaPwsW8uYgUCH2Hd56yrrbfQ2vNxewMERKjskANjjnwIm3W1mKwhLYjASFGRAiIkMtGhZL44HOkD1gtubpr2BuX7FSW2haU1mKUxrmOHRNRyiK0po8wKyNGwtF+zdUrhxTWsF4sePz8nFkTkSpRWksfJN959z43rn+Do1tfphztoG0JiJcczPdy0p8VgQOcX5zinKOqR+zuHgxOZbPWBW3b8O773+H69dsUtvxTjb435r3fBmuXNypjDNZahBC5zzI43/V6zXK5xDlH0zRcXFywWq22v8/nc0ajEePxmKIocM69yBY/xS73bT7td4DSKESpUQIEEmMtiMAmk7VaowmkGJHyRZSeYkIZQVSKSCKSswetXC5Rao2UBoTCe7DG4EO+d713WGmo6wKrIkls1qBEJUtMCqETQf0b6sCF3NSYBAfXR/zMr9xh5zqs+xnFFM6fP+PpeXYe6wDPA3wo4OfQHLaCsQ7sm8hVAftJMCeijEBJmVO1kJsJISV8gCTBpITRarh4ArwgkZAmR+gRhRQOofylI03biA+yw5pMpjkFjwEhJNZairLAR4cnkIb6ndKKsiqJziFFbvBJoG/bnDYZjTEao3LTs3ceN3jCOByiLQp816G0GhyrIiHY29vj4cfPCSGXBkpbYHTOCDbfTwuJVOBTbtamlFgsFjw7Pma5XPClL32Z8mAfJRUpJNaN49HTE5qu49q1A/b2RhRWEkOPJmGVQgJeRESIRAbHHXMJwkqDFZooDb0MeJH7Dhvzrs/ZTso3AYALnk0EaY3Bty2rxQJZVCSRG4+x7+maNVoqjDa5mSwlwXvatmVUVDnQkQJTFaSY8Eptb1IpJUkpUsypa4i5OSwQuDhEPsN9IgXURlBbxagsGJUVdVFS2AJidtb1aAehNF3wTNsWozW74xFXDg+oixK/u4M1mkUT6EOkSZJ5l3i66vn4wSPunR+ze/Uutp6i1Mt16MtNYuClaPF7OVwx1KnarsOWJWVRURbl8BjbzLH3nov5nC++/iZab277YaH9Kdgm2v60+vMmMgcoimK7bpumIYSwrZtv6uGj0Yiqypv3crlksVhsN4Ac1Pjt5rBx6JsN43LU/WkNzUIrTGnyWfARqwJCRISISAHWghGSGF9kK4KUS3FKElIiIUmCDMaQuZwopCZJiY+5Ea+Ezv00oYgalIJSRqwWQ6AmUFJjhaQPHmMSSUeWn3Gef2IOXAyNtt2jEXe/fI3JnqTpx0gj+PA7Cx58MEcliAmikGAKikmJLEsqHDtdy/665SAFlkSEliiVMCmhY8KnhIvgU0JEAUkgJWgpEFLlpkOMKJ2wlcx19BCR6sWOPl8s0LbAmAIpFa53FKZAG00IuRShVK5fKSRJqmEhgZQKYzRRSaRIpBByWhZCboIONXElM2JFpAHaIHOKJpVCW4OPEa01USmIGREyHo+3N7RWakAV5LRuE9GkIQJXUhJjyu+fBDEkBmAHJEnfeubzJfcfPuHb7z8gpEjki2irECIjRXzwG+hHjuxSbnIKmVE0AiCkXNaQCk1u0qaYLrmHAS0SM2plu9lsABlDKbHveipbIZXK7zmgVgQSrTRK5HPsnR/Ov8wR0FCCUgmEVIQkkCmnvMIYUsjooRc3dSK4SEgul2rIN6Yk5HWiJNZoiqKgrsZYrZFExrtH6KIkpMikWSKMZjoasbezw8gWpAFVsVytWXeORecR0jEvLcfHZ5w8fcjutXuocoq2NT5GjFIDoif3Rjb18ewA87Wczxff957KJUmRm88pDmn+ZUeWUUqj0XhYK3/6tlmHmxLQ5t+mpGKtpSgKmqbJDX2tcc5tG5MhBLz3PH/+nKIotpnXZhMwxmCM2a6ljfPffDa8HHV/aiaT4hCEZHiTTAFJRJLQUmBJaCJpQJ5lqIQYnHze9BnKcwBWGbyP9D7iBycvc3hPirnxnRLIJIneZ3CFkiitqIzBAEp4tIa1/Dc1AichREIq2Dmq2bsyYVRbRtESZc+VO08RfzBDB8WekNyUlpvjMaPrY8TOlGKqmKg1B+6EK2enPCFmR6JBh4SW0DgIaegxyA20LlHVBX1vSClADBibGE0M9ahmuZBo88LlnF/MqEdj6ipDi0gNWigmk8mwkDY3SEQOzjwNUCSl1LaEIlIguoQEZEooAZLcFFQAMZK8R2qNkgKhDSiNUoaySJTW4JvsiBOgN3A7cpdbKb2NdlIShLCBJYphgQFCURYVu7sHFMWIlBR9H+mbhsePn/Enf/IO33z3I0bTMUdXj9jdm6B1RfKOGMJw1bJzSDHlerk26AHV4dqOtvcUtiAJmRdrHL4fuTkryNFI8AFtDVqk/NyhhKGUwrUONWxgAghKMjIjuvUSOdTvE7kpqofNEwFBbDI7jVY6Q/siEIaIVoq8kZMdQ+wTIQp85zN6AICETAGRIkpKtFIYYymKilE9QglBvXdEPd5BKsVkeUGLZzKdMh2NqYxFhoBAUMzOqJsOvWoIqWO9p3n6bMnzJ/fZu/46ophi6l2czxDEGMNwftO2OSeFREhB8IHz8/NPvZc2h16VNTFEVqsVi9WCvZ29F48PjebSFDjXf2Y55sdlG8TIpoyyySy6rmO9Xg/Hl7aBkB1QSpt+y6YB+vz5c6y1AOzs7FBVVd5Y6zr3coagpWmaLTLleznuT373vEkMay4lUgwkkcENMnvdobwpkUPGkpuYEkS+pxGJJAQJhdAlIfSs+xYfIkVd5AhfKnoX6LpADBFdaZyL+ADWSqxWWCVRKeRyzlDZ/Sz7iTjwzUk2peLoVs26O6UPAmUi5Ujy+ld3+cPfOma0rPmbouaXi4JrO5rn10omXUd59y32Xx+zvnLCtX/xz3lXNCBylKlEBh9El3J0raCyUFhI0XHl2m3OzgLBzxHBU9eGazem3Lh9m/sfLAmd3R5ndiKCFBKhdeAiq5jrYuWoQmmVozqRyyNW2wE7HYfGp0SQHXAucAeS9xijsFqgJYjg8L5jPZ8xnu6ihugzDSULKySlsazIjTttDd6/QKE455jP+9xJHxqVkDeWGPwl3GpCa0tVjVmvHI8ePefsdI5IidW6wxRjynqKVAXr1rNY9YxH+VikkRit8T4ieo+UEZEkZVWhjSHEQItApI4QckUwN8pebIYhBFwbaNt8k43FBBUjgYSRuUQkQ8Q3HTFGtCBH1KKmLizHyzkxhOxYjaZdr3NqjEAZTVSRPgVqU6GERrqMbvFNQ+8du7s7OOe2+PeizGUnhggfcrReKkmpzTa6M1qiJBhTMLp2FzsaU+/sY+sx42bJs+cPKKoJtp5S2hI9YBBCDKS0YBQgoEiVpF2uWc+ec/z4Pt5M2LmiMdrw/ocfYo3m4OCA0WjE7OKChw8fcvv2bfb39wk+cHp2+j3vp5QS08kORVHy/OQ5Hz+4v3Xgm7VQWMv+7i7vvffugELR29f+adXDN6WNT0bJzjmUUrRti5Ry+7yiKF6CBW4i9r7vOT093Tr3XMqcbB35JipXSm3LMN+r/v3JJqbSAmFyoFNog9WCFD2KzJXIJdQBPTJkMj7lLqcQOSMNMeJSJGDpCfQpgZZIrVAml/2MLVm1js7nerowBlMYjApYLShUQvgehnIl0iBRG2T097SfkANPFHXBGz9znWJf8M//P7/N3df32b9qUXXAjODo+i5/Y3WDv9sa9kPDP5WnfOdu4u+eKr760R8xYcxNap7sThh1a9oAQia0jJgiIR0YLTAqYQ0UGmwB08mI+fkMozxVmbhxY49f/KW/ymhyyGL2TS5OXux7V65fI/lEDBkRIWKkWa+BxCROKEcZ162EIiIw1iDJizSncCKnh0phZIbYLZdLYnDs6TF+gOyFmDeaZjnH+YgqR+iyolRq64R9CPTeU9QVxpqtb0wJok84FyispihK9HDDQEQqgTWW5bJlPp9x8vyMxbzh+OkpWksmkxG2KBhP9hlP1zw/O+Fb736Ajz22eJOjwxERh7aGotQQwPUelRTWWProiTETYuxI03eetu/5ZJBXTqZIoxGrFev1mnq6S1mN6TcRk4Rqd4quSlzXowqJMpoQFV3fs1qsmM3mHF67wmR3h9XFHKstdTXFJY/zHu8jTYhIPAKJCAoZIjYITIgURuBDQ9/39J0HtUdVZ/gogBKSqiwZl5bCGAqtKJSm0BprCqa7h5TjCbaeoIsKayt819D1LdFFYqGQtqScSsrW0beRpulJ3tGcz6gMxOiRUlBVNTu7e2gpuHP7DjFFyrLEGsPu3j7GFsPmmOi6jq79LEQw3L3zGkIIPnrwEa/ducfOdOdFhF7VvPXWV/kn//Q/48nTR1y7doO6Gv3I9/JnWYxxCxPcbJRKKVarFX3fb/92uTSihpISDCg1a7fllg0J6OTkhBACVVW9KGMqhdZ6W6K5bN8rCtdKgGGoKW7IOxkKkoTYlvZyUMJAbItMJhVloQhB0HSOtfMUpkDqAlukjM5K0PYd66bBFhXjyZjxuEZLUAwwYyUHpAvEmH1Pn2TGmX+OUtdPxIFfuTHl9pdf5xf/ztf4ztN3WN5fkeQ4d+alYj5f8fxsxWhq6X3ig67lXy1XxM7w0bjg9klk1DeU0nEgVtRR0qcc0RoLwmparek6h5CQZMIJgYolF89nRLfEGMd4p+TGrevcu/fTLJanFFblCzrYqB4hkQQf6DpH1zSkGJC9wDSamAKyzUSJyWQHlEBLjRpqeZHEbLZgVJZMRjVlXQ/peqDrHd47XN+TUmQynpCIiBQheuKwCLu+p/ceWxRMyIiUywswkcsHSIk0mqQkUZAbgCGgk2SxnPHhBx/x7PiUrnOM6inGGpTOpYbWrWk7j1UCqwTNfMnDjx5jhcT81JuMxpp16rEatMy4dQT0oafzjj4EArnu6kXEiYhPgRDFNg0USqNMQVULTFEilM74+hiRA5w0xIzZXy4WBO8pqgo1sFfHozEXswva9RqhJM55JtPdAVtrhj5EysSsEFAM/QUt8DEze0ulIAZSyJGOFImUwraEw9C7sNpQ2BJb1thqTFFPsaMxpqwwRYUyFqkM1pbsXLlF161zfd5aUAZpYFzt4KqW1bqBcIFbLBCyZt00PHzwEc/XAvXeA5JzmMLS9T0i5ZqqD5EkcvlgPl/w6OEjPv7oY7705bc+9X7aRJTTyZTDwyNOzk74zvvv8vM/8wsvZWRFUfKFN7/E/Y8/wtqC4toLhuPl9/lxmbpU299E1BuHvYm6N7/3ff+S4932eIam9abs4pyjqiqcc9vfM6pNU9c1xpiXGpsv3Svpu3HgGT4CIolMfosKKXJmjcxlQ2SuXyfI97gyGR7criiVRlcWWxqcMARhWTddhieKHOQ0bc/FfM5kPKa0FkGgbVpqW2OrkuRDJtK5iOsd3vdE6ViVCqo/RRz4D2s37l7h8M4Vbt6+w4OzRxRlQluBsSXSFJRFz82bB+zKAt33pCQoREZLFH1EBo3wCRkbtIzYAXwvDOhSUo00o0qhVpHgMhtTqFwfXS1XuNDlsocCrRwxntO0M5xrianYHqeUkqIokUJROs9SKeazM9brNUImejfUwZXCmhz5Yl7g3EPfZVifkCAVUlsmOwaRPEaBaFuCD7Rtx2q1REiNlRahDSkGgvf0fZ9TTq0pVSYTXcxnGVVBZmA2fZcXquvoh4h9k64WxtA7h/OB3juWqxXrpiPFXDtPMTcKQ0gE14J3xBhYz9ecPDvn/HROYXcRKZCCwytPihElc5mg8wEXY24iCoUn4EkDxv5yqqrzOVGKNCBICltk6n6EmAJJJZQQrM5nnD0+zogRo7l54wbPnj1j3awwo4JKiC0+v3F97h1onRvDSuERhJAxMEorKjsBEXDRk5TFFBJpEoi8OWxxu+Ryk9GGsigpyxFFNaaoJxTVGGVKhLQIDAiDMAWFMei486IXmyIigjUlxlYYU6K1RYtM5JitWh6+822W7gPWITubUVXRNS1pkBQIiFzTJ7FarZlfzOm6/lMd+EvlAKWYjqdcv3qDD+9/yGKxYDweb3HuQkju3H6NZ8+OOTs/ZVSP2Ns7+FMro2ittw70clnjcqnvk43OLDXxIgtWSlEUxbbGfXZ2Rt/3KKXY2dnZMjWllLRtu22GbghCn4y6P/ldxXAOpZCoDYWegSqfIIqAHJBSiHyv+M7z/PkMt1pgjUVaS9Caxke8CHRtj0AgpcoADCQ+uBxUJYlEIJXBJ8WsDXRNi2sbkvPIgalsC/u5rslPxIEf3Tji5usGUxSQFNpoXICmTciQSyzXrk6ZPnNUrkcCb1Uj1nHE1TNH5QcqqxR0RY1cN0gjKHZKRnsl1IblsgcC0WeMpxnYVb3rSMIPjMlICHPWq49ZLiJ91xFD3J4VKQXGWqwtAIktC0LouDg/g1VAtxlDrbSmnezkBRtTpt2mjKgw1qK0HWBxgrKu0ESUSAihctMqeLqux/uGMkKBoNB2aOYMiBIlkUh671ksly9ujBhoQ0ci0vtNSpobqyEExlVGqIwmI3Z7R4hwcnpB3wWstgzUMgD6tiH0Xa6fe0+7aplfLDk8GKOVIorhJiQSyZuHCxGXNuw0gYuRkFJmcX5iAV7+3XsPIRLVi5TXmKwFc378nPvvf8DZ+TnCan7pL/0lnj55gtCSSdfR9T2d60EJ2r6n1AqrFFrmBqfWkpTyz0JrTFHgXIcLmYSjTJFp+HHod4gXx2eMpbAlpa0oihFFOcIWI6yp0apACkMmXCsg1zgzbn5AEfkehEcIjdQFylbYosYUFV0TOD1d8vjZnJPzNcsmb6yFNriuGwgfAofAJTBmaNCFRF2PP/O+SilRliVXjq7wwf0POH72lKK4g1LV5guyu7vPznSX5WLB85PnTKe72yj4x+nIhRBbB75xzJtm42UnfTky3+DFLzv4jaPu+35L9lmtVlvY4QZrLqUkDAivy5H+5VLKpyFRMitcIoVEi8yiRshM+hJp2zORA1jB95510zGfremXq8xhqEpSUdI4EDJn1Fpm7khE5IxNqlz/z2haYlJczBravse7DhEcpUyMrKawirrWJKOYf8Z5/ok48NGkphpL5svnrNoLYii5mEVcWhDSgvuPnyFdjXz4lPoict1o/vZ0j5N4wBtPnjLRAqY1XVVzNpeE+TPMWLL32gGHdw6ISnD2bkNwnlE5zcIwXcfiosFKkEaiNRidCN2C+flDFhd7dE0k+gRDEL6h/GprKIqSg6MDqkKzWMxomwZB7k7bsqRrGpSQBOdRMmOYfddTVVnfIyToQ8CEmLnoIiFsSSmyMNT84pzj44/RiwU7B579skaYjJFVRmfmZsq4dO9fpP0xJXwMdF1LSmDMC9JOSpGyMhSmYP9on8nOLjv7KxbLP+Hx4wfUZU1hLcYY1BDBeO+HBleGpK2WC7puj6ouMuGHjGmPMS9qqSQqQkgDIzBmQasMW3vhDFzfD87uBdpgPZuTXKDvOkKMVFVJWVZ8/MGH/OZv/Es+/vhjysmIo919OtdzeO2I1XKJT5GQQFvL2jUkmSGikkjb9xRlCcrk+q+UBCHpfAShEGSEi9IaEQXNer29oZVSjOoJdTmisjWlqbG6wqgCJQwyvYBH4gMp9cikEdZkUTJBRiJEQUgQhCLpAlWMsfWU+cljHhw7np8sWa/arbDYyi9JQyQaE7Qh4QBtNcaoQUTt+4saAQPySDGdTLl25Srvvvdt9vb2Bh2RDMIXQnDv3ht8651v8uTpI/b3D9nfy8JY348V+sOYMea78Nfe+62TBbblj1ziKbbN401pxxjDjRs3thT7Z8+e8fz5c/yQnW4i+PF4zHQ63b73ZQz65rM3v1926sZIikplBy4zUzemDAPJOiUaJXJmGGMW3ut6T7P0rGeeXgSCjcTC46OiMo5RaRFW5iAqJoTSFKUlxkTr+gyvFYaPHx0zX62ZVIYrezV7u2MOpiV1aTLwISiefUbr4yfiwD/4+H3eu3hKNC1Br3jjy69R1RKtBeu1xy8M/+KfvsNNe5sbsuYoSb507PmirhHHoMycfrLL/Nod3pt1tBeJ6aHh5leOuPHlO3Rd4v7j54zKKbdvvMF6teb977zLopkzloLaKEorMRq6pufJxx/z/GRBM3NsBTwEW4gaQqCMYTydMioMz54+5vjJY1znEEqhUqJft1hpUIkM4PeDtohP+eaTkigkrff0IS8IozXGaoxUmLbHeZfTQ1tS765R1QitNaNRFv/p+twQK6oXTDqlFUYVdMFl2QYp0EpjtEJrQZKRPnTEIElJZcSGkMzmaxbLlsloxO50ws50gtl0zAdo4GKx4P79wHhSE8KAuY6ZpRajp6xLbFmSRMK5nhByV17KTMcX4ZID944ipa0i3WQ8Jowm+K7fIhMWiwXz2YzT5yfMzs7olitEivz6f/5PufPG67x27zV829M0LW+89RZKSsqyBBJ91+GDpw8OZdWwaeaUlxSxdUXTrrN8gBw0cIjD9d6QfhR1PaEoxkhR4B006x7nl6zXDu+HbEhnAldVlYz2DignE1A6E6ZCJPU967ZhuV6xajuakAiqYLUInD5f0vuEKgqMSMgE7aKlH5adIqEHzH1oAqvGD9nOC3TU97IXTlGwt7vDP/5P/xMm4xFf+8pPc3BwtHViR4dXeDx9yDf+5I/4wz/8Pf7+f+8/4Pbt7xbH+lFs03zc1LA3a8sP5TPn3DZK3whVlWXJzs7OlmmplOK1117bIlXW6zVnZ2e88847PHjwgKZpePbsGYvFAqUUV65c4ejo6CVCz2iUG7Xfi81aFJaRqhFSotWAUw8JISRaK4wBJSIISecifdKMpjCaWB72j4ku5E2bfG9pnV+ntUSqHNUrDdbmTcqFjq7Pom995zg/mbN/54jbV/d58+5Bvn64LKzVCj5Lzeon4sD7uOJ89oTjszN+5hfucO/2EVLlaJm+5sE7Pe3FB/z2fmBvlKO+rx3PGbXv4k4b1G7NutUcL5c86h8jrmh60/Ns/pT4PORadAFvvvkme5PbPPj4IY1zaCOYlJLd6QhdKXSpUVXJ06eRx4+e47uSajRc5ASu6wcY0Qu9CRUddmistSmSfGaMNcs5ikToiwwDHKICOeCTQ/CsV54QPaPRJLP7xMCa1AV2NOHw+l3atsGUNS5C03ZoazPWVEqU1hgpEPqFml1WIexywyUmQgz0Li8QaxVaZa0Q5yB4RbuOtL1jurePTAIl0zaaOdjfxxidG0oh0jvPbNbw5MmMwu6QUmC+mHNxccp6vWRvf5e9w11G0xpbWaRQLBZLUgLvI8gKM2TvdTWiHG5UyDdU2/eIlCnvtsybwXWjWf7lX+b4wUPadUuMgUcf3uf5yXO867h64zo3796hkhrfdhRlleFdrid6h7Watm2oJ+Ms3zuAxI1WCIpBtDKnwkJFqqp4ASOUmqLeI5kxQdcoMyKaAicUq/Wak2fHNOslEDGFYlzX7B8esn/1OvVkD2tLBIlmNePZ6TMeP7nP6cUFF4s1z86WnJ0vSMlnSKkUWSWzcTjviUPEl4YKbHQRzwuupEwvoyq+n22cYW1LvvH136euSvb398hTELPdvfMa5+dnfPjBe8znF6R0+0+liTkajSjLchtdxxi3dWxgq4dS1zVf/vKXuXLlCkVRbB8bjUas12tOT0+3ome3bt2irmsuLi5wzm37RBuZ2f39/a0k7WdlFM4nmj6RpYjlwO70GXVGlhMW0dH6gBAGqRSjyRSB5MZr14hR0YdI6z1dCCgpqKylLC1SiSGjFVlPRShMYUEImlXP3t6Ui4tFFs1LiabvkTqzgAursPGz3fNPxIHv7VeU5ZjZYsHXf+8RZ89X7O1r9qeHqLDL6ckKqwSPuyX/SvY4aTDK8PN71wjXd1jVhufTyFkxo7gumD+XBDrU8oJ0LhhPpty6e4UvfemLFOYAF3uOHu/ycH1KIQ2TUlDuT9CjGpLm6eMLVquQcdmXuYPRs6UtRk/0DqKnKAtsWdB0Da7vWXfNoCec8K7M4lSmIMXAAAkhhIgLnuA9RhtiCDiVd2qjFCFJRvvXqFPK+iXWZnXFEDFDM1CqHCWEPm6PMsVEcgFSHKJuhXO5o09M6KIg+A15KtfzlMpRkRQKJXN2YW1JVRiCdwMJx+MTRKGZL1tOLxZUlc2biLUYrwm+Z3Exp297qqrCWM3pyTN8CPgARb3HeIAje5+hfluYWEqgBEJnOrKPnqQEsiwop2Mmh/tUkxHLizlVWRJi5I//4I8o3v4WN27dhAi/+Nf/LaqixjtH6FpwPaNRVn9EQu+7TBpCE73AeTectHw+iIEBQ5b/LCReliydZBE63NzhArQ+0XQdi/UK33VUtExtFrFShabe3aWa7oFWuL7h4bNH/PG3v8Xp6Sm98wRPlnRA4PuACyGXfZTEuUjUQ712YLmiJEpl/gFDPbeqy899fwkhOdg75O/+d/9dPvzwOzTrFcfHT7l27dYWCz0eTXjt7uvM5xd0/WdDFH9QE0IwmeSsboPV3tS9x+PxVn2wLEsODw+5e/cu9+7d25ZONuWgDTOz73vm8zlPnz7NDfCi4OrVq8QBYbRer7fPeSHcJraU/cvU+ssOfdX2NOsm8xxCZmh7l1EhwWeJ59C1dN5jCks9qhiPa1KKOTNNkiIk6pgQyoAS4P2gzSNIJme0QqqBtJZ1UaTIWeKd126yP6lBlSybyGQna+0oMzA4P8N+Ig7cao2pa452xnzz7WfMF4+Z7mom1RLRj3h4/xSloQkd9/uMQd6v97n1lS+xGo848wtmZs7KRqxVhEWJqsGlwGq1xGjDves3uXr1EFLB0eGEm9eucPb4I0gRTWB3OmV0dAPvCj7+4B1SWiOEyjT7jaXBUaZh6IH3CDKL0JYFam3o+j5jkF1PNzRTlFIkFQk+O6W4GXgwLJ4YAuuNGJZWg/CNwBYVSg/11KxvmSMwQVYeEYPmQu9e1MB9xDeOEBxeK6qqQKRBRdAFnMwOZKPbndNDgIGmLlRGoQ4lHas1RmtcSCQf8RHWbc/ZxYxpqClLRT2qs0aEVRAl0UGbHErlxRoShN7hnHtxKgdKeIgbll18weL0btuASgImB/vc++KbtKs1jz/8mIuzM4SA89k5F6ee5WxOTAJZVnztz/15RqMaYsI7R991yLogxIAPftjI3FZWIDc3c1krRk/bvoCuJQR9hG7dMlvNOZl3nC175o1j3XeMJlNGAvZES1F71N4EGHoBViOMpFs7Hjx+zHc+fMRs0ZGSRMtcl+0xNG5F7/O1lDLS9Zk+r2Te6GNMBFKWm0h501ZKIdRnY4KFyLyAXL4o+dKbb+Fdz2q14unTJ1w5ur5l8GqtOdg/4I033uT5yXPW6xV1PdpmIz+qSSmZTqfbMsnlxuRmQIMxht3dXW7dusWNGzcYjUYvwQk3yJOmaViv16zX68w0XSyYTCZMJhOKoqAoCqqqYrlccnZ2xmKxYCOatXHglx335VJK7zyp9yBUhvFK8n3iE95lGG7XOHwIICShyIqbiIQ2MktSx4gSUJWGICUpt7gyCVBk4hlsGqO5dKuEpLAFo8mI0li0VbiUiELhY67Dfx6+7E/EgdMVFH7CtUnPw9Gc+8cnnJ+DYoXoFcuZpxxrUhdZEPiAxG8Ugds3d2mkZ9G0qNpjdizVacXe8pDJ9ZpWLnDtGm9XXN0fMzKSNF9wJUW+sHvI8c4ObnYOfU+tLYe7V0AdMJ4cM7sIeVrK5QWcBq0OLrG4yFNrNqSCpmmIweNjRIZL2sQx6wwHnbXBxaVmTe6gZyrxpjZorcWYEilVFnsf9BF8jIjgs1SAEMQ06D0MV9e7wHrZ4VxGosTpOGuwpEygaNoWpQx9H/COLM0bs6Sr9x6RIOi4lYo1RmOMRflE8g4fIm3vOZvN8dExjRXTSUlRjLCFInhB30W8j0it2T04YN22MF+R5IvlJTLGLlOXnUMISVXXuRE1kCNEFHjn2D884Kd+/mfzwhaS2ewCYqQwltZHZmcX/Na//k2WbY9PkjfffJ2qtHTtknm7YufqIbauiQMyxLtM8bfakIbmbBhkbZumzcgjhlKFVPRdx2qx5uRkyZPzNc8XDeuu4d5tTW00xkTqWrM7mTKua0prMrlKSbq+4+TZc9azFt9lsYRoFLq0BFOz5gK3wft7kTH0KWFRWVgrRHxMSJX18kWCEBPOf3YTM5f6Xjioqqq5cf0W9+9/yOnpMxaLOWGQTi2KEmMN169d59Gjjzk9PcEYS1kW/DhErqSU7O/vf2ITT1sNFK01e3t7XL16lZs3b2KtzaXIQT52Q6dv23brtLsus3S7rtt+x/F4zGg0YjQabREpq9WKGCN1XbO3t/ddm9JLeuAMekuIfN6FGAg75OxHZRqPFGmguOcGPWKQNvZhK5MsU0HXJ4xKgwyCQiqdSySILTNbkDBVAVUO0vJ/kGSkdQmSw8ghI/gM+4k48NpdY7ya4FvJz/2sZ/pM0XQtVWGZVjXtXPHtbz3n0XfO8W1gnRJ/dHHG8f/rP6Eykq/81A1u7u6yN7Hc3HmNn35zxOG1m/z+O2/z3sffRHVrpkUgffiI6o8eMF06DirL4tabvLf8A8zc095/wFpp9r/wUxxdPeD0+YIU1csXN8YsgsRGPVHhXY8xZlubWywWtF2AMmuV5Jq0J0hJ2+TSRjWqMEpnav4gLrUhGsALskIInraNuVRiDEpVCBJ93w0OPo8qk0pt76+u6zk/XyBEou9bYgBbWEJwXMzPMaVk/+CQoqiAwHyeMcUZopG2GcPmDet6hE8CF8EF6GKi84HYdDjfs2pWNF2F0ZGiMGhVslFQXHYNZW1RpaWIkcSLtD8mj3Pti2krJjuK6AMqCZRQ+KanbR1lUXDn7l26tuW9D99H74w5e/iY0pZIaRGqw63X/NZ/9a+5/8EDfv4Xfobbr91kNK2wuxO++nM/x7UbN7HGIFK+ea3Rg/BW1heRStH3LzP2Cmu5t3sdlQRfvCNpPazWPRfzFfPZObJZohYXTCcjrty+zrWbr1HWlpG1GCCEiAiway1//SuvU6uSqDSd0szQLN75Dv38Ces2a0iTwMcIEVxKGQ8fyMMDstQGKXqiy72VH8YODg5YLmc8fLjg69/4Xb7z7XfQWvPa62/w+utf5MrVm9y9+zoffvQeOzs72/rzJkL4YeviUkoODg62ioIbfZcNS3nTdNyMhluv11hrmc/nrNdr2javlY2cbNu223F8VVVtnfsmmt847s366vuei4sLDg4OtuJvn9bE1FqTtB7itIwHT2nw4CkiZciQXwKFFVSlRqp8rSUySy0InRUICVycnrK7M2I82WE82WU02SWmPBUsJeiaJc1qgWCYFBRylI7MiodPn15glCQeTWlTyWcponymAxdC3Ab+Y+AaecP6Ryml/60QYh/4vwGvAR8B/35K6fzzXNxnj5+wGq3Yv3PI/mslozcdRjt2q312qxv0PnHvvcf85/+XP+HpR3NcE7DGEXnGna99keq6YpU65FzzlZ/7Ajd3djBG84V2SmEPMIzpnnfY/+cfUT9dIPcmpHvX+Bl9jUfmKm17xvNHM5aLb3P2+BhVXaGsLV2nM+lmsPl8jrIWk7IjUkoNAxLYzup7ocyXZ0T2bYdKUAxQvNV6SRQRG0OOqIdIaoNb3aSLm8g0L0aDEIPwU/DMZrOMehjVSGteuqnquuL6jStICYU1SAkhenrXU1SKelKxu3eAkpaTkwuePH7OcrkkDBC2jBf39H3H2WmDtRopVBbaFwrfOiKSPiSiSKgIylYUhaRZLwiuJ/QZRcKxZ7o7YjSZ4FxCXhIGa9ZrbFVirR2+c76Ru7ZlOZszP7/g/OSU6APXrl7j44cPmC0XfOlnfpq/9+/+ff53/6v/Dd/+5rdxPqJ1QWEijeh4/MGHTArD3dvX+Tt/+2+hpmMePX+O631mjcZA37WUZUUfOrQpAIEfRLom02neEAGjNQe7e1RFgQgaKTUpSZz3NPML5iePId2k3tljcnTEeHeCpcdIhegcUgpGquLWlevs7xRMy4qQYNl7nixaDh4r9FgQZNbPCDG3SDLQKbCZpETMYmCSzBCMSZA+fwl8uHEBJDHAYj7nnXfe5o+//sd8+NFHeVDIZMwX3/wSv/zLv8L1azd59vSYh/v3EUKws7M3lGJ+wM+8/PFCUFXV9nq/kJcYdLOHoGW5XDKfz3M2OJQ9NjXzDz74YFvvBraIlU1NfDwec3BwwJUrV7byshsEyiZAGo/Hn+q4Lx8nQPQ+Y72lymQaBgVPH1FJooXGSImSCSEiMeRetJECpCIM+vV7kzF1VVBVJZOdHa7cuM1iuWQ83SWlxMXpM068o+9bBGC0HMp5DucSbROQtaWLGpd+PFR6D/zPU0p/IISYAL8vhPh14H8M/LOU0n8khPiHwD8E/hef4/1Qey1p/4LlaEFpI6KD0GlWztPPn3HRnNOLlttvViiRWM16ygn85b92kze+dBstDQ8enPPtjz4iiQX/zp//t0hpQdutkTKhheLB777Nlfc0E6l5Ehu+0T6m2Z9yVF8lrOeZhSgs56crqhuS8WQX71/W8Oj6LisARrZOFl6wxzbCVVVVERMUpkBJQSTSd03We5aZ9p1ER+pguVwO9b8MC9swyLquQwBVWWZ9DinQmwHJiJw+ikQlRnmWJRsYoUQWkr5riSkSfJ7G03tHSpG2aZiLGVoVrJdrurbP4vTDrMTCFpRFiVIC166I0aNtiVaZTk7T40MYSA4aW5TU1YjpxDKdVKwuVqzmK1LvKEyFjgrlBSmJLMYzWFmOsaYeDtsPx9Zu0QjjyYS9nV1IiePnz9m/dsTd6RscHBziuw5ZWlZdS7fuKExJaS3jqsIpzfzigqdPnjCbz7h78zr7IbNYg9cURiMH8X+hzbBxZOp2Cp6irF9iBbrWM7K5rqqEgoGYZCrF+PY1itEIM55i6nEezxfbofGd8d+FMuyNd6jGmtHBYYaZzZbstoHdakqRNBaJJ0MGfUr4RCZ1Den5pm+eiNsJT47Pj0LZmEDwwfvv8C9+47/gN/6rf0brBKtVh+s7loslZ6envPOtb7IznnD92k1idHn48XiClD96cr5pSF5uYG6c7IuyodkKa00mE8bjcR7WcXHB22+/jXP5mDaMzA0kcT6fUxQFBwcH3Lt3bxsQaa0zAqeuGY1G1HXN17/+9e062xzD9hgH0TlSxv/IpLKcsNZEJfLkHNvlcYtF7lch1FBaGYbCAEJmJdDpZERZGLTKk3lCiLmZngJalxhbUlgDoUMLgEAkklIWupvUJeWoRAjJ95lVsbXPvEoppSfAk+HnhRDiW8BN4O8Bf2142v8J+Bd8TgfepZ51e4477bhxpeLk+ZroBJVNlLbjbHHB/kHNV3/2gOtXdzg/behSy50v7jPeK9GpxNo1zgWWixkXy2OIitWqp1k5nF8gjxsWq132nAfVoSeB8ZVD7i5LZFchKstKj3hQStJoilz54UZ8cXGzitoLIfcYsxIaSQ6MxFwzI4HzAS0VWmqkkNsI01ajgeCSLmk9b4YavGCdOefo23aY/CEx1lDEPA27LEt89LmTnXipphd9wLVZ7EgMJBvnHM5lBEZaS2Kv0NrTN32WzRQSyDokMeQhFMJoGMTlY8r65cHnafApBpI0pJTZdC74DKkSESkCSuWyTVFYSALXZcKDEZcJEyVqINeooVykZcbplraAEFEyj50ydYkwGluWGGM4PV4z2d9jd3+X0/6ErllnwlLKteRmveL05IRnx8+4+9aXGU8mrFcZa6wEjKoKoTTOD1K4iYFspQkhbvsJKUS6dYMvK+pKYYvs3FMosZ0C4dCmQJYVwlqEiHl01DByDyHQZcnO0RVQAYopfTfHtY4CxVha1FLAguEzB8xTjCDTMPgDFJkIFIcmZl4wP3g4nFLk2ZOHHD96wOLiAjPew1iDd30mDHU9xyfPWVxc0LctR0dHXL1ynYOjqxzsH/7An/dJ2zAkLzM9U0rUdc3BwcFLSoIb3HhZlnRdh3OOuq6Zz+cvPb4hB4UQqOua6XTKwcHB9j7aRPAb8Ssp5VYA67L2ysaapqO9WGbpBamy+uawrgbhWFLXIjWshcL6jBJKIa8ha3QmSQ28ESUDMXlUgKiX2NEK7wOL1Rpr8ohGHwNCZMawHOYQ5KuukdJm+K/Mg1N+5BLKZRNCvAb8HPDbwNXBuZNSeiKEuPI9XvOrwK9C1vIFWKwalv2MULbsO89s3hGcwFUJHzRnZw1Xr444ujNlfx9mFx2rtmFnd0wS+YuOqxHX9g+5ulfQducoMSVFTeglzcWcYp24aHv2ZgGtJTdtpvXeuIiYviSEitM45hzFIkpC35JCfOk+sdYOiIVcsMwjvl50s6XIQwa6ziHlINa+4SqmRHAOU1QQE0lshiyIrfO+TCHuug7XO5zpCT4715QiKaSMO08qO9ftgINsznnWyzwvUkkJIgvi9MMcS5E0XWpp6en6rD0tpULKXEKJQzqq1AayJvJItBBxvUeEQIqOFCTe9TSNpGkbYrDg2iy9qQVCWqTSeBdoXJ4GX6RL0rxCEQPD8IQ85FUpg5aSZMxW20UgqHcmoBWBRNt2tM5x4/Yt1m/N+U54h8f3H9I2eVBzilmGYD6/4PT0NM9MNBZvAm3TsHI943ov38S+Gxq4G90NnVmtlxh63XpFX9ekSc6RlS0QosQUktSvETGSQk/sgOBQwWXBIZEHR6jSMjo6YnlxRusYgooWawosgriK+HnMk+xFLqGEmEgyDFBU8uzFYTuWSgzL7/NgEjY3XS69uOA4ffaU5cUFIqZMKqmqXJ4RYGLAuI6RUkxFQpw8Zf34AYvbdzncOPAfspayWduX0ScbBz4ej9nf36csy63M7GXq/IYuP5lMWK/XL9HsN8/bKBFuIu1NhL6h6W8c9QbOuNkUPlnTXy0bLk4vcD4jUXKfJDcaRYLkRR57aCVmkUliG4SQVpmlne+nQcdFRgoDSpcU8555k4gyIuQ51pS4dk2znCGjo7aaqixy0zQKpLKDlIPIw9M9n2mf24ELIcbA/x34n6WU5p+3uZFS+kfAPwK4ceNGApjN18zUkr1blpg8QkdkklkgynsuTlu6pkFMdynGkoNqxE21TyFHND7S+cDeZIerXz3k7tVdFov3KKxgOtqhne2wXH6ACIpni4bRLHE0NdxRjvD0OfWjFbos6NoRcV6yWDgW6YxmeU5yI8SliTxCSrzLqoE2WJIQ+OiHtFZgB9Gj9aqhqmtQAp8ChAzad94NkbFAGZX7ViHkxhUbR7qJUlSeTj0QOaQAgsf1niRFpn4jWLddhhkOx9j3PfPZkqKwOB1IKSsn9n3We8mzAz3rtqfvA86FnA4akyGOIRNqXOgoS0VVl3miiMuEoC3kMHj6dY9zPZNRgdsdE7o8/09rgxC5Kdg7T9t0rNctdbiUKQyyoiHGvPC1QpBnfyZS/v5KorShDy7XfgEfA73v+dJbX+bO0XUKoZmfn/P85ARrDJGI8z1Ns2axzFNrUozUVc1ysWAxnzGqDDtmKJ+k4fZMDPTobQBODJ5mMaevCtoqa6ELJVFFiUgFcd0Qu4bYrHMNe7aiKivkVQ2mQBiDqMYomVg/eEhce5qLOdEHRgdjXPR0bU/b9LmnwLaXmZFpwwaqFFmGQCaSyE4vfE4iz+V6b9esefbsGbPFgpjAdz2j/R2UrhhpzX5w3FhfcGVUc/TmPXZWS6ZP7uOf3iN98S22aJT0wzU0L0ffl1UR9/b2MMZsj3dTXtk4+w0aZdNn2rCDL0frmwh847Qvl2kunwdjDFevXmWxWGRI6Se+R9O0zOfLTHknC00hxAA0iLRdRsMIrUkyQ02lEGgyjHDr0DelIQFKphyc2Oeo4hFJhIHDYbeNaS0SdWHY35tiTOZvFEXJdGIyEMCA8j8mHLgQwpCd9/85pfSfDn8+FkJcH6Lv68Czz/NeAFdvjrl7/TrXX5swT6fUdYuMCpGyLoa2gYuLjmuHComkMCW3Dm/QNIHj44d08zUjUXF9eoW6Urz30QeUumG3vseto6+ingVc94hOwKlwtN6hz88Qj+fcnFl29jWrdsnTsuOdSjEWHav+HIlFXG68tWukyvVlrRSVHEgBsHXeZVFh9HLrVDvvcQQSNjPBQlYbM0FjrEHEmMeWyawb4kj0WmeUSlHlmdYhIoKjXzs+/OA+SSuuXL/BdO+AFAXLQcwHIPhI23RDHb2gKC3WVlTlCD8oE4YuR/VxgElZW+DWLUVZ5hmSzhF8S9MHqnENIhGHmaLKaBSepm1pug4hBYU1HO3uoBIEF5EyoXQui3Rdm9NE7y5NP88oFC0VRkms1bi+pypL+q6hGRx7UZVMCosSOqeZMutzxxgZjUZ89d6baGCxnPPrv/7roMUwf1Ahjc466IOcrjaGejSmbdc8fvwIqTTS5LRaG4tA4Lp2S5QB8H3P2cljlG9Jvsd1DXXbUI6mCKC5mLNq58SM7cMfX1BXYwwWf7aC4hxVV7Su49HTU1JwjMqCycEhcf+AB6cLVn2AJFAbuJ/IaJNN6cqHBNtZoZuMBUzxeWCEL9vF6QmnZ2esupYoEyH0hG6NdIGx17xhNX/5aI+f39GY/py0OmN5LGkf3qTnb2bnkNLQZP3hdVIu64Dv7OxweHj4UjR9+XkbzLf3fkvWaZpmixvfOPMN2/STZZhNE3Nzf3jvuXXrFk+fPt3CDy9/7s2rR1wvVWY6C5DkIKd3jsY55l2LhzwsO0lSzOMTRUpYKxEqkkTuhXkXIWS5ZTVk2a5vcWEzjnCdAyKZhqYpPH1+ss3qtdbs7Ey589oNXrtznSQt4Ph+9nlQKAL43wPfSin9ry899J8B/yHwHw3//3981ntt7EtvvMnVL1+nlyekRWJdCUqtENKxbCPyBE6edcTb/bCrSnxYk1QE0fPw4SPiSrLYa5iWklW7pg1P2anucXX3NY5ujml/+/9N8+V9tI88XT/jg9lD6kbROsGdWcTOeq4Uip++UbHaKVh0+zg3QeoXaf/5+UWmv5oCa4dJOymByE1AawxVadFa5ZKF95ByKhw9mElBaQpSinS9o+263DgsCwpjc8Ox7Zi1jrIoKbRBkAiup1kuiDHguob1KjDZ3WWyu0dhDIvFC8eotaEoRiyXC7xbASJrWdsSFT3r9Yqu6zNpR2WMtw+C56dnGQolNbAZCRWZr5bEKPAuZvU+rUGQdcldFiLqWs9stsIQkCLPkBQyl226tiPFfLPqyxERnpgk0Qe8b/PA5xQohwgqCrBVSRcDGgFDbVoKgUIwme4w3ZnyhS+/yV88/2Wez05555tv40Rg/2CP17/wOm+++eYW8okQ7O3tMiotZydPefzkMTdubujiGZNrjGXdtNvyjQuek/kFPnlaH5isesbTlnK8JiFYrpaoUjMe14zKEjE6JKwa3Lpj0fXM+o657/AyYIlUaMaTKWY04u1vvcNvf/07rDrPQAXZ/hvkv3KmNqBHNjpgG1z3DwMImS/nhOiw2lCZkt71fKlb8loU7PSJ/V5QB0lQNTYI0mqJXj2kcL9Ja/Yo/vbfQU92sjb2D4EN/6TKYFmWXL9+/btq4i/WsmY2m9G27VZIbmdn56XG58aBW2uZTqdbstCGCAa8VLaRUjIej9nd3WW1WtE0zSfq4AHvWlatw0VoO7dFSaWUB7mMdqcZRDCUDIMQOBe5ffNL9G3HcpmHlGw+8+692wTfsVrMWa5WVLZACo0timEua56nqZVGCDWMKMz6QgI4fnqC6zrEZB+Kne97jj9PBP6XgH8AfEMI8UfD3/6XZMf9j4UQ/xPgY+C//zneK3+otZiywlOwO56SR3m29LHByx5SBr6vuiW7kxFVWYLo0cqyP53wPoL5suE4nPPk2QjvEkJ4wmqFmF1QP7jgpNBM9wt2Dq6zuqhp3ztjZRZ8fNoj6glXvKCUkeu64cNRhVV7hK7M2CAANrMJwzBjMtdxkRLf90Syat/O7i6r9ZrZsmE1aDobrVHCsFg2mN3ciCNA2zpcm9Xv1OBIZMrN0YFHnZ127wluzWx+gTYF148Ot80dUmZKbsxYw3h3jC4FEBmNKooibyqlNkwmJXt7O/gAIQr6Hjp/QkLiQ54EpKTEDUOJu67Fu4hSlrKsscawWi/pux4QGFOgtc1TlWxGEiidb/AUE6kYYHoxUZQvsMt931AO9GAhoGlWBGkYVWNMUeTBxFLkhimbuZV54xEhoZXEuR6hJLdfv8v/6D/8B9z/6COePXvGrdu3eOutL/Plt97KPYJLCJ+qrji6coXw7Bnz+ZyEpKzytXR9l0W3NigUoEkB7T1iuaKPhpUD23iitgQStS1QUaOTzmPZyprUdciY8E3HatURw5xyMuLqG29ST6c8OX3OP////h7vPDihHZQks+MenOJAoUzD75uSSm6MveiX/KC2f3jEwZWbjB88YDFQzL8aen56soPuA9a17HYNcg5BSGLn8kCDkxP4V/+crl+RfulX0NduoqpRdt8/wGFcdtQboapyyKg+icveOOHNpJ5NlK2Uoq7r7Wsu17g3I9U2czE3JZvvloxVlEND/JMOvHWeZduzbh29c6iU0IKsXyINnY+MjGKvHOO9yesSwelsRXQ9tQZTayqVA5Hz2RoRekolkJVBB4XQKme3ViEE9J3D+0AMgRgzYisfp+Da4QF1qRASus9RNvs8KJR/xfe+bH/jMz/hU2w1WzE7maOmPbUtsGNwKbLs/BDxSqxKdJ3H7tXsjPawuiI4w3p5zLrtWHUNOimenZ/h1p7SRrrzJf3iOeLjZzxIki9qx2i3YtfsMJ5POCvPOSk6yukuIZWI6Dg2M3wtsHpElzLl9YXlOvVwHnK9V2mCyMgMpRXjUc3OzpSmdVlKNWU2n7YFIUY67zMNegDr+xDp2g49aJKAHCRKt7dz1j5u15ydnjLdP+SwqjBak0JEabHFvAIoIylHBdJ4lEqMqhJrLEZbjNYIPK7Q+CBwXiBUyEQWbUBkdqjRkuA6XJ8IPqNYhBnYYTHSNR3BBaQp0MoM+GjyIAWTIVV5VJjEDoOOI1BWL8DL3g2DegfJzhBjrsFvShhDpJkHF0uk0sQQCc5njZdCE1QiGkm9M+Hazeu8/oU3OLs4Z/9gn4PDA+rplGaIxKSUWyH+alwzaae0bUfXNZnGr/PNaAr9ogaeIr139DHSuB76hthZnDEgIjFI/PEF7fGMhVSUUmG0JgTPsl0za9d0fk1pHbv795geXeFi3fHO/TN+51szZk3ITnvTFIeNntngHLOg1abHsvk5xjQ48s+2y45xZ2ef17/wJR49fMizp8eoznE9Ba6IhJYJkQK27QgohBmmecaEXC+RD79DDA3BNYif+fPw2puo3f0fKAa/XP+u65rJZLI9vk86783UnQ2B53I921q7Jehcrqtv4IWfbJhePgcbSv7G2X+ydBMSRCFR1lCKwP5II2Ic+lMKHwNSduzv7RCCxPmIS5J10+GaGQcHI8zE4L1CSk3XtoR2xmQ6Yq8q6auEUAUn8zXWJqyWqNrS94NaacgNTBcyXvtgd8S4yOXIC6FZf8Y5/okwMU8fndH55+zciexfmWDtoB3dCYLLDTylAutVRLHDtLqem2QucXx8wWyxZNW3iCQ4Pp8hmo660KzUiubCEs7mLNeRtFpi+iUTCwdXxvRjSbyqaQ4qnuk9FuuOD0+fc2OkMbpAtoK0bRxkB6t17jKnCM6FYbKMziPEZG5OTqcT5vMVRmeYX5YarbfOQMUB3TI4p8wsExgTB2Gp7EwSZEF/51nOl6xWDaJc07uAtjE3R6TKqn6bo5QCYzI0sLCSslBU1mJ0gUi5jt93Hc4LfFRkgEt2YBtUjIDsLENCoJAEstaTx3UdXdNBEkPz0eSUWgik1iSRJ/N4lzMVKQRWa6LIOPatRTKJSQiUSiht0GWJF2TEzfBdlNIZjmcLSBC6nsIaVG2IhSQaATrDLG/cuM71dDPrq0tFR0Ko3Ag2SmYtihSJQjKajjP7suvpmhXRGKSx+ORf6MqEyHK1xtoaK0PWbcGhhENLReoE62czlhcrWHboLkIS9FrSak8oI3aq2Llzhet3v0AykvcfnvBbf3LCe6e7mGqK6+aE4Le1bZF7lQADQkmi9GaKe46+N8zFH8RSSlTViC+/9VM8+PgBH77/Ad26x4oArqeIERkDoY/0tESZN2ehBCo6pFuhPvgWaXZMbHN5QEx+gcuKht8vK9hEy/k4Ksbj8UuR9Cdfu0Gf9H3/Xbolm+++ee3GWX8SX/5p4+E2k6k2mPAXzOPNgebGuS0khYC7R3UerhJASI3RkdX8gv2pxgVJ2wdaLxlXGhXX7IxGjEeWmAQhas4vDCIsmZYl+7sjgs9j9nQhiC4yLjU7I0vrIlrnwS8+CJZN5GzlmIwMY5NI0dJFlYvv38d+Ig78/IHj+IMF+psNb/z5Nbe+UuJ1Sx871uvIww8aSiMIV5bcOZCkgzG2juhSoihxMbLqevpWY58uGfnALF2wdz1wY2/CzaPEv/cvP6byBYyewkFkegjF6C6ja/eo96YEdmmPZzR//IBiPOGiHRTyLsUYeYfPA2211pnOm2Imh0iVNU4EVFXB3u6E2ews14l1nsUoUQhtUEWB7ztWTUPftFijUEZBVCQZ0aTNaD5iGnbjCLasQRpmszk+ROrRhMKOsNpsoV3CR2QfsEpTComJgtT0rF1Ls26Yr+fMlkt6lyMBaWqCc2gl6FrH+XpN37V0bUtZVRnXLvKY1bZz22G6G+JFjv6BAZfbu4a2XeP6Doskuqz37FOkjnBjOJdG50k3eehBbjIqW5CS2LLmlMiU/q7r0SqPfDNKk0QiKkFPoAuOzmc0zHw+xytBMR5himKIgDpi2yJSViVMJPqY0QdVVaKlzNrhfce4KghxI9oKbe95+vAM4QumRxOUVdBHYtcikVRlxd6NKcEkuq6heXLB8qLhDE+/J6mujTi8eZM3fu6X2bl6yDd+/3f4l//6Ef/1Nx3i8Ke4fWCZP3ubi4szmuG8ppTFqwZfnbMYYzBG5ywlZmmFqvrBqfQpJe7ceY2v/fTP8eTJMd/++td52JxzrW+xGgol0Ulk7LtU2GmJnozRZYFarZHtkjQ/JXz9d4h1Da/dQ+4eZZG3zzFwd+NQNxrfSqmXps5vbONsLy4uXiLLbdial6f6XHbgm3LKJx+7DDXcPGcymTAajbbveekkQfQkoBpXKFsioxjKXJJRpdmpaoS0LJqG2crR+8TOuOTKtM5N9kRGYPWRw70daj1mb1xhlGAdBJ7I1YMJKkQKJQbVUIk1grqUpCQ5m3csmi6rk1pB6B0ifjZ09CfiwKUUiGBwp4Ljt2F86Fh3LevQgXd0PuG6wJUrC87Wf8LT+Yyr5i6lucqde3t8dLzDct6xPOu4v7zgYFpgekO7rxC1RO6MMPd2kbfu4r92G3nYM9ETRnXP0eFXQUPbSFZr2Nu9xWxlmK88SRQvNd4KWxFDwHuH9yajBHxDKg1aCVrX0jRLgu8H8aqO9bphvlpyMrugbXtsWXHl8BAlMp28WSzQWlL6gtFoTK1GuWNtFD0i03d1QT3ewYQRsbT0IcBqTYwCrSuMtVtol/eebr1mXJs8TECELTkk9J7YR1TUMNDlfeOQKAojIEpcH+mcp3MBW+bINwt6ZV2XBBRlia1GmLJGD0OVL87PaZZnGCPRWuYxckKgtEIImYc/X0r7jS0oioqQNjRnUCJjvTc3+raGKfN5iIN+Rt87qpTwzlNXNUwjs3mGxh1cO0IN5SXnM9sukTVGUvCEFBCDLOdqtcJqw6iuESkhU0KLsC2aaak4mEz54vUjrtz8CqsucHz+jGcffJtdo3j95j2q6Q7yaA95bZ/Rzzji6QydAiG0jMYjbty+hx5P+eYf/ib/+J9+nX/5nYqP5teIpub64U2uT3o++Og9nj47GXTbh/thGAfnQiA0MdeoC5sF1gRbuv/ntcu46uvXbvArf/Wv81f/1t/mt//j/yNnH7+DdT1CCqppjZkLulVLJwRBa9K4hre+irh2i+g9qV0hZESePkfsHr0kN/FZtlEN3AxzgJed9+aab/S8N3+77Kg/+ZrN99oQezYDSD6JOd84/00JZTQabcsoGzNa5/5GCrQOjk/XKJUn6cRB3yRFRUiOxkfaPtF3PdNS4pLgdN4PDOie9bpnOh4jTMm6S8SuYeUj8zZxOC7Zqw0BTeslqy6QusSBNGgRaNqW2cUFKd0hKoMtFab7NzQCT2krlcPsSc/D3xX0IjK6orh1fcIv/bnX+Z3ffR/fW9Z9z8rNcWlF336MKRdcO5wyPwl0qxXVrmFydYybBR7HOW+vTmH/KvaX38DerTj8wlXG011u+ns8ePp7PJ+9zZ2bfw0pBbZaIevErPEoO0JRIINmw1oui4IUMx2dlLBFOSykQZS963l+ckaznHPr+hWqwnBydsrFYjVQZkv6do3rGg6uXOG127eYn5/z8MHHsCGyGAtS0LQN0g8RV0xYW7A7PaTc28Eo0FKihQKZaPrmBc61sNjdHaxJeN8SvMMogdQKXRTo3mN0Fq7PWhR5kSuRx8EVZUUSGlRD6z0DUjY/R+ZjzIyzXDrJ7xFxfcP5eoaUifGk5vDwgOneLiJksa0+OPSlidqb8WFSKoxVSK2yDrV4ma0HmeEmU8LLLDyltcYaiwuOwhqq/X3CZMqH73/A2eKCm3fuMJ5McwlHSmxRgMxDlrU0CJFYLhdoY3C9IziXs6gQWK1XWecdGI8sX3zrOgj4k+/8MbPFihRbpgVcne6ws3uVtpXM2xW+6Nm/csDh/i7N+RkyJcZ7B0xv3WWxXvBbv/eH/Otv9bx/LFj3jyGdIscBM4ovsXtf3BQbPHiOyGPscTHrSgsp8jr5oSzPdd3d3eHo+jV+9+CI04/eYRoSZZbXw+7tYFyWH4izVZYcThJ95wauW8PynLbreJ7+a+6/+z5vfuGL3Lx9h7LM1/fTSikb5uTu7u5LGt+fpgwohOD09JSu67YR82VEySdft4EKblApl0k+nxbZe++3pJ+qqrZa5ABd52jaHmk0T57NGJeSg/16YFgmVq3nOx88yOdwZ8KoLjGl5WK14pvvP6KuK64c7XC0P0UZyel8zbfef0xZaPZ2xxwe7aKJPD9f8+jJOmeftmAyneb5AEGzU2do6/7eFJEyIqUohoEO7fe/uj8RB56ZRxnj2i8DT97rCMJxLVquXpvy1t2Kd77+GN9popckAn2Y03SOxXzFet7TrRM+SDyCqBU718Z0TeAJa67u7/LFe19hrb9DO4rUpaXiBvXoiMZ3GLuDiwukdUx2Rjg3ppAjIoLUya0DZ1OLSzEjRYbmF5AXflEy3dnDGkVwnrZd44PHWMP+3j7Xrl5FKUlZWMbjmsl4xHhUD5jpFwswQ6QG9ETMzrouK/aODrDT8TAt3ucoM/iXVDGMtRR6SkqOfhkIwWWp1Bhp2p7VuqNt87gxIbP+ePQZlhZ9IIWshmekwKeIknmQcgiJkESm3gO4HkVEKElIgb5b0XUt1ghitEDAR481Ok+rd474kv6EyjKuPhOZlMm9BFOoLbM0DvVKsbkhyTdk3kgkQpiskY0AY9g/PGC5XLJeLFFSU1R1bgSKrDaY6xKR4N1QGhJ5WIYQKKlIfY+6FNglJD2Gi1UHVcW1vQPGhWKsEjoEPvj4PZ7PHcpo9vYnTMeJVjdIIRntHTA6uoaox5wdf8gfvvuER88jyyUZLmlWTEe3SClkZNPlYbsxMQxCz6SQAbaXyNKyBF6SZf0su+xQN2gNpTTf/ua3OF7MuWYsbXL45DOBKGlCaUhebxvuaTYjfvMbhFUD6zVdEpx88ID/0hse/5W/xt/8m/82N27cHHRMNidRvHQMVVVt2ZaX7TJSZFOjPj8/39b6L0MALxOALv8ML6SYN6+5PHvz8mddpukXRfGSLspsvmR2OmM8nRBQINRWvz9/juL5rGE8FoxHIdPsJbikeXS6Zs8LxpMxKQqktvRN4OHJkrI0YAsOkqAwmmUTeHCywjnP7mTEeGeXoijonMdFST2quFNOqEuDwmO0RH+OpOsnFoGHmBEPvo+sG0cMjvGoIF5MufuVN7iy8yGrdkXoFSmACys65zg+XnH8ZMXsrKNrIiw9fQ97167gFj1dC+c2Ut26QzN7TJ8cOjqUHmHLiuR2UbIiMUOowGgyYTmfIkKFD57g5NZB5knpZtA/ScTg0SbT6zfNyp3dXcZ1QXNxSt+7PAZqOubO7TvcvH4dKRLOuax5EAM7OztYa+ja7oVwfcrRsLYVMSa00ozqOo9usobGdTgf8K7H+UBZj7Y18ASElHAu5tp5n5leIQS6tqfrPM4FokhbqraSuVwQY0/yAUnEGokxgqJQ2wEQ3ueJ6D4lcC0RhYwSkieEHmUkttBoI4nR0boGoSqcD3lqvH2R/wnyMIVMkuixDFGleDG/cHPON+lzTPGlSCuXhtIgEiLYPzrIWUGIdOsWoy26KIYGax4OEUIkbJqACaQeNoDhvBlrt/XckGAVFHpccvX6DY72digVNLMZ77//Me9+9C7eR67uX+VwMsXNHUt3zu7ePnZ6gBzvsHaOBw8f8Pb9M2ZLh+87tPKM64rxuOT42Qld3281yDfXkI1OjhADECo3CzJGOA16GT+4SSmp6xGj0ZjvvPsuu0dHTEcW9ewh4ewZrBakEFlMprRojICJgEMJ8fh5HiXkHco5bPuc509W/F5V8dWv/RT7B/uM9WTL8r8ciAshthT3y43FTzrhjfTrbDZ76TmX/22+xyebnxtM+Oaxy5vCZdswNTciV6vVavvYYtVycj4nIBlPJhir876/LRNI1n2kCGKQYMhXLAnFuofaCXxgKzyV0MybgEPSuJQHj4hcCTlb9HRdj9IFSkrqwtB2EJE5Kq9qdPKQBk1x+fL3+DT7yUTgg7JfCAEXBqx165kfR87u1/y5n/9FXr/xlG++/3v0a4FbCxgU9O5/tOTBgwXnJwHnFM4HmiuB3eIGUiW6xZqz1QMulg9oe0GUEfQKmyIuLCnsDqCzYLtUmGKC0TUxDVGZFFsHHkJ4KYLonUOoTC8XA0ZXK42tamK3xhaWHWvZP7zK66+/Rl1UdF2DMYau61iv1+zs7LC7u0c3TIAPIU+OscaibUnf5+kx5TDPr2salvMFfZvHt7kYmezub89ls244PT/G9X12yiEQQxapij5PDxJK4KMjEgnJYUxBXdes130O/IRCDiprdb2RWw10nWe9dnlqDQmBH6B5CV1abFFTFhprZB4o7Hqk0gQfc7Q/0KXzNQdSprAHHzEmT1Hp+m5bG93MTNy8wHtPGAYAFLZACIgufz9hFCYE9vb36ZsO7yLL+ZKdgzyvVCpFSC8YeUJKpEqD/kvc4nmLYkDVAEJITDXirZ/6IkfXbiFRnJ2c8cEHz/j1P3lI27b83M0JNw8q9uqC5nTFbP2c3at3ieWEdUicnTzmj//kHd57fDpASwO6KNjf30NqzdOTc1brZmiYv7C0ZfXk7CjlA3r58R/SRqMRt27dIjjHl976ModG43//N3G//1uE975F6+H+1dd5KBSyWXHTdfylw12EzjMhaZdMmzlfWM742bnjX97/kOPjx9x77R7j0eRTP3PjwDdNycuOd+NsjTF477cEm8va3pv/X25MXnbUQojvisDzeXpxojb1701JpqoqptMpZ2dn2+e4EFmtO6RcMBqN8kATDVqE7eCPLL8gQRnQlhg6RIoUOs+BTYO43QthNEUcglQXBME7Iom+C3R9wseM9rcy4ZUhJoUPEZJDJkddSAqZUPyb6sBT1hnI0Vh2NCEGLk5XPHx3TvPskF/+uX+fjx4/4Pj4AXWtee3mHZSFp0+/zWol8J3GryVu5Tk/nXNx0XLz8CaVann0+B3+4Ou/RVkpjg6v5FKAPeViec60vsNqvYRUYtRVopMQFMkFfCcI/kW655zbOpe8eCRd1yJEIqEy6cZ1RNcjkmfv4Ihl2wKJ3nVbtb+yLCnLcrtpKa1QWm0bU4k0RBeK3gd8SrSuJ4o8zb1ZZZRHTGmrSLixrus4Oz+nWa0wSlMV5SA3mxPa0ahGaWi7dZaYBUxRU493uHb9Sq6xioQPPV3f0nfNMMYr30QhSrzPkexmks8Gp+x9HuUmZHa+ZVEO+G1wvcfol5ETmcqvqOrRIMofX4hzDTeg1prOO5q2xa3WJB/YP9wjpohruzz/E0FCI3Rme+qipKVn3XQs5gtMXZGiHIIDB4PQVxgUI6VSpKhZpRxgbpr9RWl57fYVRuMdTs6X3P/4Ce+//zH3P37MzVvX+Ou/+FWujzWnT5/x4Xvf4umDC+584XVcgpOTY5btnEcPPuD3//g9luuekPJ5GY9GHB7s8PTZKcfPT7YIlE9ayqNhiIOvS2zIK4Ky+MGamJdtw0b8yte+9mJtv/4Fur/yN1i//cd89Ed/xGt/47/DgVS8+8d/wHtvf52v3r3O3ddu4pZnxI/vEz9eEs9XjIHJaJfJaAdrvz8yZtO4vIzRvgwJ3Dy+mSpvjHmJqLNx2p+kyl9+/8tIlMsQxY3z3pB8nMvZ8WQyeSkrKI1kWmsmpYJuQWgTejylMAbnA9E7bl8/RGuDNhqpFUVRMWojb967PjT5C5CGUWlY9w2HexNSCogEPiaM1aRVO6hNJmLKYlfe9cSgcH2ia3pK0WNlQOoKlfQ2U/y+5/gHXg0/DkuZfZSDrUxUSELRup7jpw/53X/9z/n7/+BX+Rt/4d/jj979Z5we3+db35rx5lf2ONgdcfqkp0kREQTtGk4+6nn/2n1S4ZnWFlkbZsuAT4pidYpLIGWgbWqCAx/nSFXRdwdEB0otiUKxmK9JzlMPvbfNotj8LGUW2nc+Y4ez9keXnV4KrLqWddvQecfz42N2JlOm052t3rH3nvPzc6bTKVLr7Xtn5EvC2IqiHuVacMwOq1032cnaIi9KJPPZ4kUT0xqm0wl1VeYaedtnJpvWGbolIEWHtYqiNkil6V2k6TxVWZOix/U9vu/p1j2z2QJbKHZ3x+zv7TDd3eHwyjWm013i8N1JsF63AzEmi2vlkVie+WzBs2fPmc0ukI3n7nDJ+94RhMyReZFlBIQa9JgHMaAY8sDfvCoExli0NlRlRRv7rRSsVBJtLT4ME96DoDAWUPTes1wuqfQE5AZeBiJGrM1InRACPiaC1BRF1mwHaJqGtx+9R/X4jAePz1i3LbvTMX/rV36Wr/3UT1Npw+/+3m/z9W9+zJPHp+xJxUHb8Oz4EcwNnV/TLc85rD239jUPLwJSa2yh6b3jo48eZamB7xdOD3DCzTkgZT36HyUChxeR6eb/qiiobr1GcfUGzRe+AqZgzxqe3d/habPm5Pd+kyvfLuklpHoHXvsq8lf+HX7p8CZ/6cZdbt++zXg0JqWXSyfbr/EJ5/vJEsiGjNb3PcvlkrIsX3LSl2GCn4QIbpqam9Fsn2yCf/J9NuzLzUCIjaMHuHfzkHt1pLI696bwFDqgVUIrQVXU7I4rep8QOKxylEYzvb7DtWt79C4PPrcqMCoM+nDEdPL6wJr2CL9mPB4jdyb81JdeY9n2SJFYLBZMD8c5UAsggqEUCiMg+Uh0uXz5WfYTceC+dzntjR7v4hCJQYqCpmn4k6//Dj/3zl/ha6/9Ioe7V3l8+g7z/j26dsYbd/Z49rClXfZoqxkfTDk7ueDpk6fIquVgb8rIj/GpxEWDC2Xu5gqJ1hOK8gpNL3K0tlizWnqIgnaVeP7kAq08dbWXT86w+DaONsasWOGjp+/bgTXm2NudIqXmfDZnvlhQFJZJPWJvmnUMNjKWUm7kZzuMzdTbTf1z3bQoZZHGoGVmg/VtR7duBv2Ezey+xGK+eEl+NSM7BEZqVnFBShm1YcuC6B1FVWFsHhjQdY5uuaDvYbVY4V1P8IEYEn0fcT041xNj3iT8MLNxsZgRYy5/kCR9n6GAXddnyB556MR61TCfL2m7jkJdQk6IgFQJpSR6mDZugkTJoc47NPC02ggLCZI2qJToY67xozIGPQ2DZ7uuJ/Y9aZA6iEpijaXrHMxXmEFSQEtN8i1S5lFxUQiSlOAcKYRtD269bnn/Wx+CfIQ2FW/cu8Ub9+5w8+ZNfNPzO29/g48/fMbR+Aav/cyXmJ0d8/HTx7hRxfhwD20lk1HFT71xhQbD7NvnXDSOi9mCECIXs/lL9f5Ps08695QghYTrf7ga+CdtW4MGUBIhS3aOrvLgwQNms3POZhdU16+z89pfIEwm6MkuancPubMLkx1eLypUUWELu20+fy9Cz2bNX/5ul+dieu+3Mq+Xo+7N6y5H5JdhhZvo/bJC4eXXXsaBw4tIfROJb2UpgOnIUlCiZZ5/GYTKgzpk7hlpkRjbXKcGjSCipcyCVCRIiqzlDVrDGMne2BCCIUZPJGCtotKacVXROo/zPVp6UugwMkDoB8BGnTX5QyT4uCV7fT/7iTjwotplspuIDAMFYsgLNaWcNuiSd779Ha6vb9MH8MtdmsUBT51EL0uu1wdUh46ExijDhV4yFpJ6XiBDhRJTXJrQdJKzvmBlMy2WWGDKLGHati1t2+N6Cd4SOkVZ7KDVi7RQS5t1WYIghQQiw/uFB99Hks8d/Ogz4qG2U+R4iApSQdcEYlwjENuhxN55Yjzfaj1A3hi6rmVZtkiVZzfGlOjbltVsnTWhGTgH8JKGtTIl5eQQyM1PYSfbn401Q+M1E4dESmjtKBljAnRtTwg+bwYpa6Xk6fEBrQVFaZBVRZ8KYqeHTSRDzIIY8N7WDtrlCYmllBWynFDtHGLLF1T6R+++i7IWKfNUkzjoU28gxUkAMkfoiCwtMMABEEAI/rvADr3rSH44fnKdUhmN87lMpY3eTjVKvkdKgd+MMkPge4/Wkn7d5Le1Neb6l4hJUNVj9NE12vqQY1fRrzwPW0M7vkYx3UdPpqh6TLIF63qfpCcYrRDVCHNtxG294mdHC5ZdGMAvCuf8Zzrw72UbyB7A22+/zZMnT36o9/k06/ueZ8fHzBYzLi7mJFPxnix5HDSyB7nokN0FnC4+M6l//vw5kJ3n8fHxd33fjRPfjEZbLpcvOfbLz7lcXrv8b8POfPTo0XfhuuFFjX3z+s2knq7rmM1mL2+S9ZRoFG4oYyYBfiDoZVnnoZCxEfTaZBybLElcLnSIgW2tBqBDIsSAkxKJJlqRx7UFT4ieudYEMQyyVnlwiEkCGSUmaE79y+idTzPxfdO5H7PduHEj/eqv/up/Y5/3yl7ZK3tl//9gv/Zrv/b7KaU/98m/f35K1St7Za/slb2yf6Psv9EIXAjxHFjx/2vvjkHkqOI4jn9/BJNCUxijcsSgF0mTSq+wUVJqcs1ply6FpYIWFidp0ipoKygKQcQ0KqY0iJAuGuVyuXCcuWjAmCOHWGilon+LeYvLsbMWe/vezNvfB5aZfbvw3u/+e392Zm734Jdsk3bDQZx5FjjzbCiR+dGIeHDnYNYGDiDpyqhDgZo582xw5tnQpcw+hWJm1lNu4GZmPVWigb9bYM7SnHk2OPNs6Ezm7OfAzcxsd/gUiplZT7mBm5n1VLYGLumEpA1Jm5KWc82bm6Rbkq5JWpF0JY0dkHRR0o20vb/0Oich6QNJ25LWhsZaM0p6PdV9Q9JzZVY9mZbMZyX9nGq9Imlx6LEaMh+W9JWkdUnXJb2Sxqut9ZjM3az18JegT+sG7AFuAkeAvcBV4FiOuXPfgFvAwR1jbwLLaX8ZeKP0OifMeBxYANb+LyNwLNV7HzCfXgd7SmfYpcxngddGPLeWzHPAQtrfD3yfslVb6zGZO1nrXO/AnwI2I+KHiPgTOA8sZZq7C5aAc2n/HPB8uaVMLiIuAb/uGG7LuAScj4g/IuJHYJPm9dArLZnb1JJ5KyK+S/u/A+vAISqu9ZjMbYpmztXADwE/Dd2/zfgfSp8F8IWkbyUNvrnr4YjYguYFAjxUbHXT05ax9tq/LGk1nWIZnEqoLrOkx4AngcvMSK13ZIYO1jpXAx/1LZS1/v3i0xGxAJwEXpJ0vPSCCqu59u8AjwNPAFvAW2m8qsyS7gM+AV6NiN/GPXXEWC9zj8jcyVrnauC3gcND9x8B7mSaO6uIuJO228BnNIdTdyXNAaTtdrkVTk1bxmprHxF3I+LviPgHeI//Dp2rySzpHppG9lFEfJqGq671qMxdrXWuBv4NcFTSvKS9wCngQqa5s5F0r6T9g33gWWCNJuvp9LTTwOdlVjhVbRkvAKck7ZM0DxwFvi6wvl03aGLJCzS1hkoyq/lPCe8D6xHx9tBD1da6LXNna53x6u4izRXdm8CZ0lebp5TxCM0V6avA9UFO4AHgS+BG2h4ovdYJc35Mcxj5F807kBfHZQTOpLpvACdLr38XM38IXANWaX6R5yrL/AzN6YBVYCXdFmuu9ZjMnay1P0pvZtZT/iSmmVlPuYGbmfWUG7iZWU+5gZuZ9ZQbuJlZT7mBm5n1lBu4mVlP/QuLFvKdCralOAAAAABJRU5ErkJggg==",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8])\n"
     ]
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "\n",
    "# functions to show an image\n",
    "\n",
    "\n",
    "def imshow(img):\n",
    "    img = img / 2 + 0.5     # unnormalize\n",
    "    npimg = img.numpy()\n",
    "    plt.imshow(np.transpose(npimg, (1, 2, 0)))\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "# get some random training images\n",
    "dataiter = iter(train_loader)\n",
    "# images, labels = dataiter.next()\n",
    "images, labels = next(dataiter)\n",
    "\n",
    "# show images\n",
    "imshow(torchvision.utils.make_grid(images))\n",
    "# print labels\n",
    "print(labels.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2402ebdf",
   "metadata": {},
   "source": [
    "## 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "2efe2126",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "class BasicBlock(nn.Module):\n",
    "    expansion = 1\n",
    "\n",
    "    def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):\n",
    "        super(BasicBlock, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,\n",
    "                               kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(out_channel)\n",
    "        self.relu = nn.ReLU()\n",
    "        self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,\n",
    "                               kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(out_channel)\n",
    "        self.downsample = downsample\n",
    "\n",
    "    def forward(self, x):\n",
    "        identity = x\n",
    "        if self.downsample is not None:\n",
    "            identity = self.downsample(x)\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "\n",
    "        out += identity\n",
    "        out = self.relu(out)\n",
    "\n",
    "        return out\n",
    "\n",
    "\n",
    "class Bottleneck(nn.Module):\n",
    "\n",
    "    expansion = 4\n",
    "\n",
    "    def __init__(self, in_channel, out_channel, stride=1, downsample=None, groups=1, width_per_group=64):\n",
    "        super(Bottleneck, self).__init__()\n",
    "\n",
    "        width = int(out_channel * (width_per_group / 64.)) * groups\n",
    "\n",
    "        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width,\n",
    "                               kernel_size=1, stride=1, bias=False)  # squeeze channels\n",
    "        self.bn1 = nn.BatchNorm2d(width)\n",
    "        # -----------------------------------------\n",
    "        self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups,\n",
    "                               kernel_size=3, stride=stride, bias=False, padding=1)\n",
    "        self.bn2 = nn.BatchNorm2d(width)\n",
    "        # -----------------------------------------\n",
    "        self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel*self.expansion,\n",
    "                               kernel_size=1, stride=1, bias=False)  # unsqueeze channels\n",
    "        self.bn3 = nn.BatchNorm2d(out_channel*self.expansion)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.downsample = downsample\n",
    "\n",
    "    def forward(self, x):\n",
    "        identity = x\n",
    "        if self.downsample is not None:\n",
    "            identity = self.downsample(x)\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)\n",
    "\n",
    "        out += identity\n",
    "        out = self.relu(out)\n",
    "\n",
    "        return out\n",
    "\n",
    "\n",
    "class ResNet(nn.Module):\n",
    "\n",
    "    def __init__(self, block, blocks_num, num_classes=1000):\n",
    "        super(ResNet, self).__init__()\n",
    "        self.in_channel = 64\n",
    "\n",
    "        self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2, padding=3, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(self.in_channel)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "        self.layer1 = self._make_layer(block, 64, blocks_num[0])\n",
    "        self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)\n",
    "        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n",
    "        self.fc = nn.Linear(512 * block.expansion, num_classes)\n",
    "\n",
    "    def _make_layer(self, block, channel, block_num, stride=1):\n",
    "        downsample = None\n",
    "        if stride != 1 or self.in_channel != channel * block.expansion:\n",
    "            downsample = nn.Sequential(\n",
    "                nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(channel * block.expansion))\n",
    "\n",
    "        layers = []\n",
    "        layers.append(block(self.in_channel,\n",
    "                            channel,\n",
    "                            downsample=downsample,\n",
    "                            stride=stride))\n",
    "        self.in_channel = channel * block.expansion\n",
    "\n",
    "        for _ in range(1, block_num):\n",
    "            layers.append(block(self.in_channel, channel))\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = self.layer4(x)\n",
    "\n",
    "        x = self.avgpool(x)\n",
    "        x = torch.flatten(x, 1)\n",
    "        x = self.fc(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "def resnet34(num_classes=100):\n",
    "    return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n",
    "\n",
    "def smallnet(num_classes=100):\n",
    "    return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes)\n",
    "\n",
    "net = resnet34()\n",
    "#print(net)\n",
    "\n",
    "\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "ab196fc8",
   "metadata": {},
   "source": [
    "## 定义SKNet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "e56d56fd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SKNet(\n",
      "  (basic_conv): Sequential(\n",
      "    (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
      "    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (2): ReLU(inplace=True)\n",
      "  )\n",
      "  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
      "  (stage_1): Sequential(\n",
      "    (0): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential(\n",
      "        (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (1): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (2): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "  )\n",
      "  (stage_2): Sequential(\n",
      "    (0): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential(\n",
      "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (1): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (2): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (3): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "  )\n",
      "  (stage_3): Sequential(\n",
      "    (0): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (1): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (2): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (3): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (4): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (5): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (6): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (7): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (8): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (9): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (10): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (11): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (12): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (13): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (14): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (15): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (16): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (17): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (18): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (19): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (20): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (21): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (22): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(32, 512, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "  )\n",
      "  (stage_4): Sequential(\n",
      "    (0): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(1024, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(64, 1024, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential(\n",
      "        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
      "        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (1): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(2048, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(1024, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(64, 1024, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "    (2): SKUnit(\n",
      "      (conv1): Sequential(\n",
      "        (0): Conv2d(2048, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "        (2): ReLU(inplace=True)\n",
      "      )\n",
      "      (conv2_sk): SKConv(\n",
      "        (convs): ModuleList(\n",
      "          (0): Sequential(\n",
      "            (0): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "          (1): Sequential(\n",
      "            (0): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=32, bias=False)\n",
      "            (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU(inplace=True)\n",
      "          )\n",
      "        )\n",
      "        (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "        (fc): Sequential(\n",
      "          (0): Conv2d(1024, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "          (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          (2): ReLU(inplace=True)\n",
      "        )\n",
      "        (fcs): ModuleList(\n",
      "          (0-1): 2 x Conv2d(64, 1024, kernel_size=(1, 1), stride=(1, 1))\n",
      "        )\n",
      "        (softmax): Softmax(dim=1)\n",
      "      )\n",
      "      (conv3): Sequential(\n",
      "        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "      )\n",
      "      (shortcut): Sequential()\n",
      "      (relu): ReLU(inplace=True)\n",
      "    )\n",
      "  )\n",
      "  (gap): AdaptiveAvgPool2d(output_size=(1, 1))\n",
      "  (classifier): Linear(in_features=2048, out_features=1000, bias=True)\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "#sknet\n",
    "class SKConv(nn.Module):\n",
    "    def __init__(self, features, M=2, G=32, r=16, stride=1 ,L=32):\n",
    "        \"\"\" Constructor\n",
    "        Args:\n",
    "            features: input channel dimensionality.\n",
    "            M: the number of branchs.\n",
    "            G: num of convolution groups.\n",
    "            r: the ratio for compute d, the length of z.\n",
    "            stride: stride, default 1.\n",
    "            L: the minimum dim of the vector z in paper, default 32.\n",
    "        \"\"\"\n",
    "        super(SKConv, self).__init__()\n",
    "        d = max(int(features/r), L)\n",
    "        self.M = M\n",
    "        self.features = features\n",
    "        self.convs = nn.ModuleList([])\n",
    "        for i in range(M):\n",
    "            self.convs.append(nn.Sequential(\n",
    "                nn.Conv2d(features, features, kernel_size=3, stride=stride, padding=1+i, dilation=1+i, groups=G, bias=False),\n",
    "                nn.BatchNorm2d(features),\n",
    "                nn.ReLU(inplace=True)\n",
    "            ))\n",
    "        self.gap = nn.AdaptiveAvgPool2d((1,1))\n",
    "        self.fc = nn.Sequential(nn.Conv2d(features, d, kernel_size=1, stride=1, bias=False),\n",
    "                                nn.BatchNorm2d(d),\n",
    "                                nn.ReLU(inplace=True))\n",
    "        self.fcs = nn.ModuleList([])\n",
    "        for i in range(M):\n",
    "            self.fcs.append(\n",
    "                 nn.Conv2d(d, features, kernel_size=1, stride=1)\n",
    "            )\n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        \n",
    "        batch_size = x.shape[0]\n",
    "        \n",
    "        feats = [conv(x) for conv in self.convs]      \n",
    "        feats = torch.cat(feats, dim=1)\n",
    "        feats = feats.view(batch_size, self.M, self.features, feats.shape[2], feats.shape[3])\n",
    "        \n",
    "        feats_U = torch.sum(feats, dim=1)\n",
    "        feats_S = self.gap(feats_U)\n",
    "        feats_Z = self.fc(feats_S)\n",
    "\n",
    "        attention_vectors = [fc(feats_Z) for fc in self.fcs]\n",
    "        attention_vectors = torch.cat(attention_vectors, dim=1)\n",
    "        attention_vectors = attention_vectors.view(batch_size, self.M, self.features, 1, 1)\n",
    "        attention_vectors = self.softmax(attention_vectors)\n",
    "        \n",
    "        feats_V = torch.sum(feats*attention_vectors, dim=1)\n",
    "        \n",
    "        return feats_V\n",
    "\n",
    "\n",
    "class SKUnit(nn.Module):\n",
    "    def __init__(self, in_features, mid_features, out_features, M=2, G=32, r=16, stride=1, L=32):\n",
    "        \"\"\" Constructor\n",
    "        Args:\n",
    "            in_features: input channel dimensionality.\n",
    "            out_features: output channel dimensionality.\n",
    "            M: the number of branchs.\n",
    "            G: num of convolution groups.\n",
    "            r: the ratio for compute d, the length of z.\n",
    "            mid_features: the channle dim of the middle conv with stride not 1, default out_features/2.\n",
    "            stride: stride.\n",
    "            L: the minimum dim of the vector z in paper.\n",
    "        \"\"\"\n",
    "        super(SKUnit, self).__init__()\n",
    "        \n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv2d(in_features, mid_features, 1, stride=1, bias=False),\n",
    "            nn.BatchNorm2d(mid_features),\n",
    "            nn.ReLU(inplace=True)\n",
    "            )\n",
    "        \n",
    "        self.conv2_sk = SKConv(mid_features, M=M, G=G, r=r, stride=stride, L=L)\n",
    "        \n",
    "        self.conv3 = nn.Sequential(\n",
    "            nn.Conv2d(mid_features, out_features, 1, stride=1, bias=False),\n",
    "            nn.BatchNorm2d(out_features)\n",
    "            )\n",
    "        \n",
    "\n",
    "        if in_features == out_features: # when dim not change, input_features could be added diectly to out\n",
    "            self.shortcut = nn.Sequential()\n",
    "        else: # when dim not change, input_features should also change dim to be added to out\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_features, out_features, 1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(out_features)\n",
    "            )\n",
    "        \n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        residual = x\n",
    "        \n",
    "        out = self.conv1(x)\n",
    "        out = self.conv2_sk(out)\n",
    "        out = self.conv3(out)\n",
    "        \n",
    "        return self.relu(out + self.shortcut(residual))\n",
    "\n",
    "class SKNet(nn.Module):\n",
    "    def __init__(self, class_num, nums_block_list = [3, 4, 6, 3], strides_list = [1, 2, 2, 2]):\n",
    "        super(SKNet, self).__init__()\n",
    "        self.basic_conv = nn.Sequential(\n",
    "            nn.Conv2d(3, 64, 7, 2, 3, bias=False),\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(inplace=True),\n",
    "        )\n",
    "        \n",
    "        self.maxpool = nn.MaxPool2d(3,2,1)\n",
    "        \n",
    "        self.stage_1 = self._make_layer(64, 128, 256, nums_block=nums_block_list[0], stride=strides_list[0])\n",
    "        self.stage_2 = self._make_layer(256, 256, 512, nums_block=nums_block_list[1], stride=strides_list[1])\n",
    "        self.stage_3 = self._make_layer(512, 512, 1024, nums_block=nums_block_list[2], stride=strides_list[2])\n",
    "        self.stage_4 = self._make_layer(1024, 1024, 2048, nums_block=nums_block_list[3], stride=strides_list[3])\n",
    "     \n",
    "        self.gap = nn.AdaptiveAvgPool2d((1, 1))\n",
    "        self.classifier = nn.Linear(2048, class_num)\n",
    "        \n",
    "    def _make_layer(self, in_feats, mid_feats, out_feats, nums_block, stride=1):\n",
    "        layers=[SKUnit(in_feats, mid_feats, out_feats, stride=stride)]\n",
    "        for _ in range(1,nums_block):\n",
    "            layers.append(SKUnit(out_feats, mid_feats, out_feats))\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        fea = self.basic_conv(x)\n",
    "        fea = self.maxpool(fea)\n",
    "        fea = self.stage_1(fea)\n",
    "        fea = self.stage_2(fea)\n",
    "        fea = self.stage_3(fea)\n",
    "        fea = self.stage_4(fea)\n",
    "        fea = self.gap(fea)\n",
    "        fea = torch.squeeze(fea)\n",
    "        fea = self.classifier(fea)\n",
    "        return fea\n",
    "\n",
    "def SKNet26(nums_class=1000):\n",
    "    return SKNet(nums_class, [2, 2, 2, 2])\n",
    "def SKNet50(nums_class=1000):\n",
    "    return SKNet(nums_class, [3, 4, 6, 3])\n",
    "def SKNet101(nums_class=1000):\n",
    "    return SKNet(nums_class, [3, 4, 23, 3])\n",
    "\n",
    "sknet=SKNet101()\n",
    "print(sknet)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a1b72b33",
   "metadata": {},
   "source": [
    "## 定义优化器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "af29e5e8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1250\n"
     ]
    }
   ],
   "source": [
    "import torch.optim as optim\n",
    "\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)\n",
    "print(len(train_loader))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2bc43de9",
   "metadata": {},
   "source": [
    "## 训练网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "df16afc4",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'top1Correct' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32mC:\\WINDOWS\\TEMP/ipykernel_29476/2025183058.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     31\u001b[0m             \u001b[0mtop5Predicted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtopk\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mk\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m5\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlargest\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     32\u001b[0m             \u001b[0mtotal\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mlabels\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 33\u001b[1;33m             \u001b[0mtop1Correct\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mtop1Predicted\u001b[0m \u001b[1;33m==\u001b[0m \u001b[0mlabels\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msum\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     34\u001b[0m             \u001b[0mlabel_resize\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlabels\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mview\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexpand_as\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtop5Predicted\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     35\u001b[0m             \u001b[0mtop5Correct\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0meq\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtop5Predicted\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlabel_resize\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mview\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msum\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'top1Correct' is not defined"
     ]
    }
   ],
   "source": [
    "lossList = []\n",
    "top1AccuracyList = []   # top1准确率列表\n",
    "top5AccuracyList = []   # top5准确率列表\n",
    "max_epoch = 2\n",
    "for epoch in range(max_epoch):  # loop over the dataset multiple times\n",
    "\n",
    "    running_loss = 0.0\n",
    "    for i, data in enumerate(train_loader, 0):\n",
    "        # get the inputs; data is a list of [inputs, labels]\n",
    "        inputs, labels = data\n",
    "#         print(labels)\n",
    "#         print(len(inputs))\n",
    "\n",
    "        # zero the parameter gradients\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        # forward + backward + optimize\n",
    "        outputs = net(inputs)\n",
    "#         print(outputs)\n",
    "#         print(labels)\n",
    "        loss = criterion(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        for data in test_loader:\n",
    "            images, labels = data\n",
    "            # calculate outputs by running images through the network\n",
    "            outputs = net(images)\n",
    "            # the class with the highest energy is what we choose as prediction\n",
    "            _, top1Predicted = torch.max(outputs.data, 1)\n",
    "            top5Predicted = torch.topk(outputs.data, k=5, dim=1, largest=True)[0]\n",
    "            total += labels.size(0)\n",
    "            top1Correct += (top1Predicted == labels).sum().item()\n",
    "            label_resize = labels.view(-1, 1).expand_as(top5Predicted)\n",
    "            top5Correct += torch.eq(top5Predicted, label_resize).view(-1).sum().float().item()\n",
    "\n",
    "        # print statistics\n",
    "        running_loss += loss.item()\n",
    "        # 每100个mini-batch输出一次准确率\n",
    "        if i % 100 == 99:    # print every 100 mini-batches\n",
    "            # since we're not training, we don't need to calculate the gradients for our outputs\n",
    "            total = 0\n",
    "            top1Correct = 0\n",
    "            top5Correct = 0\n",
    "            with torch.no_grad():\n",
    "                for data in test_loader:\n",
    "                    images, labels = data\n",
    "                    # calculate outputs by running images through the network\n",
    "                    outputs = net(images)\n",
    "                    # the class with the highest energy is what we choose as prediction\n",
    "                    _, top1Predicted = torch.max(outputs.data, 1)\n",
    "                    top5Predicted = torch.topk(outputs.data, k=5, dim=1, largest=True)[0]\n",
    "                    total += labels.size(0)\n",
    "                    top1Correct += (top1Predicted == labels).sum().item()\n",
    "                    label_resize = labels.view(-1, 1).expand_as(top5Predicted)\n",
    "                    top5Correct += torch.eq(top5Predicted, label_resize).view(-1).sum().float().item()\n",
    "            print(f'[{epoch + 1}, {i + 1:5d}] top1Accuracy: {100 * top1Correct / total} %')\n",
    "            print(f'[{epoch + 1}, {i + 1:5d}] top5Accuracy: {100 * top5Correct / total} %')\n",
    "            print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')\n",
    "            top1AccuracyList.append(top1Correct / total)\n",
    "            top5AccuracyList.append(top5Correct / total)\n",
    "            lossList.append(running_loss / 100)\n",
    "            running_loss = 0.0\n",
    "\n",
    "print('Finished Training')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1d660104",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 绘制曲线\n",
    "plt.figure(figsize=(5,3))\n",
    "plt.plot(np.arange(1,len(lossList)), lossList)\n",
    "plt.title('validation loss')\n",
    "\n",
    "plt.figure(figsize=(5,3))\n",
    "plt.plot(np.arange(1,len(top1AccuracyList)), top1AccuracyList)\n",
    "plt.title('validation top1 accuracy')\n",
    "\n",
    "plt.figure(figsize=(5,3))\n",
    "plt.plot(np.arange(1,len(top5AccuracyList)), top5AccuracyList)\n",
    "plt.title('validation top5 accuracy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c507f975",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存模型\n",
    "PATH = './cifar_ResNet.pth'\n",
    "torch.save(net.state_dict(), PATH)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8529d621",
   "metadata": {},
   "source": [
    "## 在测试集上测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b18091e4",
   "metadata": {},
   "outputs": [],
   "source": [
    "top1Correct = 0\n",
    "top5Correct = 0\n",
    "total = 0\n",
    "# since we're not training, we don't need to calculate the gradients for our outputs\n",
    "with torch.no_grad():\n",
    "    for data in test_loader:\n",
    "        images, labels = data\n",
    "        # calculate outputs by running images through the network\n",
    "        outputs = net(images)\n",
    "        # the class with the highest energy is what we choose as prediction\n",
    "        _, predicted = torch.max(outputs.data, 1)\n",
    "        top5Predicted = torch.topk(outputs.data, k=5, dim=1, largest=True)[0]\n",
    "        total += labels.size(0)\n",
    "        top1Correct += (predicted == labels).sum().item()\n",
    "        label_resize = labels.view(-1, 1).expand_as(top5Predicted)\n",
    "        top5Correct += torch.eq(top5Predicted, label_resize).view(-1).sum().float().item()\n",
    "\n",
    "print(f'Top1 Accuracy of the network on the  test images: {100 * top1Correct // total} %')\n",
    "print(f'Top5 Accuracy of the network on the  test images: {100 * top5Correct // total} %')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
