{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<font color=\"red\">注</font>: 使用 tensorboard 可视化需要安装 tensorflow (TensorBoard依赖于tensorflow库，可以任意安装tensorflow的gpu/cpu版本)\n",
    "\n",
    "```shell\n",
    "pip install tensorflow-cpu\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:44.804492Z",
     "iopub.status.busy": "2025-01-21T08:17:44.804328Z",
     "iopub.status.idle": "2025-01-21T08:17:46.958945Z",
     "shell.execute_reply": "2025-01-21T08:17:46.958384Z",
     "shell.execute_reply.started": "2025-01-21T08:17:44.804473Z"
    },
    "ExecuteTime": {
     "end_time": "2025-03-02T07:02:39.344361Z",
     "start_time": "2025-03-02T07:02:32.910422Z"
    }
   },
   "source": [
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import sklearn\n",
    "import pandas as pd\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "from tqdm.auto import tqdm\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, torch:\n",
    "    print(module.__name__, module.__version__)\n",
    "    \n",
    "device = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
    "print(device)\n",
    "\n",
    "seed = 42\n"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sys.version_info(major=3, minor=12, micro=3, releaselevel='final', serial=0)\n",
      "matplotlib 3.10.0\n",
      "numpy 1.26.4\n",
      "pandas 2.2.3\n",
      "sklearn 1.6.1\n",
      "torch 2.6.0+cu118\n",
      "cuda:0\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据准备\n",
    "\n",
    "https://www.kaggle.com/competitions/cifar-10/data\n",
    "\n",
    "```shell\n",
    "$ tree -L 1 cifar-10                                    \n",
    "cifar-10\n",
    "├── sampleSubmission.csv\n",
    "├── test\n",
    "├── train\n",
    "└── trainLabels.csv\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:46.960544Z",
     "iopub.status.busy": "2025-01-21T08:17:46.960147Z",
     "iopub.status.idle": "2025-01-21T08:17:49.220895Z",
     "shell.execute_reply": "2025-01-21T08:17:49.220214Z",
     "shell.execute_reply.started": "2025-01-21T08:17:46.960522Z"
    },
    "tags": [],
    "ExecuteTime": {
     "end_time": "2025-03-02T07:02:39.512976Z",
     "start_time": "2025-03-02T07:02:39.345670Z"
    }
   },
   "source": [
    "from pathlib import Path\n",
    "\n",
    "DATA_DIR = Path(\".\")\n",
    "DATA_DIR1 =Path(\"competitions/cifar-10/\")\n",
    "train_lables_file = DATA_DIR / \"trainLabels.csv\"\n",
    "test_csv_file = DATA_DIR / \"sampleSubmission.csv\" #测试集模板csv文件\n",
    "train_folder = DATA_DIR1 / \"train\"\n",
    "test_folder = DATA_DIR1 / \"test\"\n",
    "\n",
    "#所有的类别\n",
    "class_names = [\n",
    "    'airplane',\n",
    "    'automobile',\n",
    "    'bird',\n",
    "    'cat',\n",
    "    'deer',\n",
    "    'dog',\n",
    "    'frog',\n",
    "    'horse',\n",
    "    'ship',\n",
    "    'truck',\n",
    "]\n",
    "\n",
    "def parse_csv_file(filepath, folder):\n",
    "    \"\"\"Parses csv files into (filename(path), label) format\"\"\"\n",
    "    results = []\n",
    "    #读取所有行\n",
    "    with open(filepath, 'r') as f:\n",
    "#         lines = f.readlines()  为什么加[1:]，可以试这个\n",
    "        #第一行不需要，因为第一行是标签\n",
    "        lines = f.readlines()[1:] \n",
    "    for line in lines:#依次去取每一行\n",
    "        image_id, label_str = line.strip('\\n').split(',')\n",
    "        image_full_path = folder / f\"{image_id}.png\"\n",
    "        results.append((image_full_path, label_str)) #得到对应图片的路径和分类\n",
    "    return results\n",
    "\n",
    "#解析对应的文件夹\n",
    "train_labels_info = parse_csv_file(train_lables_file, train_folder)\n",
    "test_csv_info = parse_csv_file(test_csv_file, test_folder)\n",
    "#打印\n",
    "import pprint\n",
    "pprint.pprint(train_labels_info[0:5])\n",
    "pprint.pprint(test_csv_info[0:5])\n",
    "print(len(train_labels_info), len(test_csv_info))"
   ],
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: 'trainLabels.csv'",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mFileNotFoundError\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[2], line 39\u001B[0m\n\u001B[0;32m     36\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m results\n\u001B[0;32m     38\u001B[0m \u001B[38;5;66;03m#解析对应的文件夹\u001B[39;00m\n\u001B[1;32m---> 39\u001B[0m train_labels_info \u001B[38;5;241m=\u001B[39m \u001B[43mparse_csv_file\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtrain_lables_file\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mtrain_folder\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     40\u001B[0m test_csv_info \u001B[38;5;241m=\u001B[39m parse_csv_file(test_csv_file, test_folder)\n\u001B[0;32m     41\u001B[0m \u001B[38;5;66;03m#打印\u001B[39;00m\n",
      "Cell \u001B[1;32mIn[2], line 28\u001B[0m, in \u001B[0;36mparse_csv_file\u001B[1;34m(filepath, folder)\u001B[0m\n\u001B[0;32m     26\u001B[0m     results \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m     27\u001B[0m     \u001B[38;5;66;03m#读取所有行\u001B[39;00m\n\u001B[1;32m---> 28\u001B[0m     \u001B[38;5;28;01mwith\u001B[39;00m \u001B[38;5;28;43mopen\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mfilepath\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mr\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m)\u001B[49m \u001B[38;5;28;01mas\u001B[39;00m f:\n\u001B[0;32m     29\u001B[0m \u001B[38;5;66;03m#         lines = f.readlines()  为什么加[1:]，可以试这个\u001B[39;00m\n\u001B[0;32m     30\u001B[0m         \u001B[38;5;66;03m#第一行不需要，因为第一行是标签\u001B[39;00m\n\u001B[0;32m     31\u001B[0m         lines \u001B[38;5;241m=\u001B[39m f\u001B[38;5;241m.\u001B[39mreadlines()[\u001B[38;5;241m1\u001B[39m:] \n\u001B[0;32m     32\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m line \u001B[38;5;129;01min\u001B[39;00m lines:\u001B[38;5;66;03m#依次去取每一行\u001B[39;00m\n",
      "File \u001B[1;32m~\\venv\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:324\u001B[0m, in \u001B[0;36m_modified_open\u001B[1;34m(file, *args, **kwargs)\u001B[0m\n\u001B[0;32m    317\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m file \u001B[38;5;129;01min\u001B[39;00m {\u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m2\u001B[39m}:\n\u001B[0;32m    318\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mValueError\u001B[39;00m(\n\u001B[0;32m    319\u001B[0m         \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mIPython won\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mt let you open fd=\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mfile\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m by default \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    320\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mas it is likely to crash IPython. If you know what you are doing, \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    321\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124myou can use builtins\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m open.\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    322\u001B[0m     )\n\u001B[1;32m--> 324\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mio_open\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfile\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[1;31mFileNotFoundError\u001B[0m: [Errno 2] No such file or directory: 'trainLabels.csv'"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:49.221678Z",
     "iopub.status.busy": "2025-01-21T08:17:49.221499Z",
     "iopub.status.idle": "2025-01-21T08:17:49.278733Z",
     "shell.execute_reply": "2025-01-21T08:17:49.278233Z",
     "shell.execute_reply.started": "2025-01-21T08:17:49.221658Z"
    }
   },
   "source": [
    "# train_df = pd.DataFrame(train_labels_info)\n",
    "train_df = pd.DataFrame(train_labels_info[0:45000])\n",
    "valid_df = pd.DataFrame(train_labels_info[45000:])\n",
    "test_df = pd.DataFrame(test_csv_info)\n",
    "\n",
    "train_df.columns = ['filepath', 'class']\n",
    "valid_df.columns = ['filepath', 'class']\n",
    "test_df.columns = ['filepath', 'class']\n",
    "\n",
    "print(train_df.head())\n",
    "print(valid_df.head())\n",
    "print(test_df.head())"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:49.279531Z",
     "iopub.status.busy": "2025-01-21T08:17:49.279337Z",
     "iopub.status.idle": "2025-01-21T08:17:50.075246Z",
     "shell.execute_reply": "2025-01-21T08:17:50.074702Z",
     "shell.execute_reply.started": "2025-01-21T08:17:49.279511Z"
    }
   },
   "source": [
    "from PIL import Image\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torchvision import transforms\n",
    "\n",
    "class Cifar10Dataset(Dataset):\n",
    "    df_map = {\n",
    "        \"train\": train_df,\n",
    "        \"eval\": valid_df,\n",
    "        \"test\": test_df\n",
    "    }\n",
    "    label_to_idx = {label: idx for idx, label in enumerate(class_names)}\n",
    "    idx_to_label = {idx: label for idx, label in enumerate(class_names)}\n",
    "    def __init__(self, mode, transform=None):\n",
    "        self.df = self.df_map.get(mode, None)\n",
    "        if self.df is None:\n",
    "            raise ValueError(\"mode should be one of train, val, test, but got {}\".format(mode))\n",
    "\n",
    "        self.transform = transform\n",
    "        \n",
    "    def __getitem__(self, index):\n",
    "        img_path, label = self.df.iloc[index]\n",
    "        img = Image.open(img_path).convert('RGB')\n",
    "        # # img 转换为 channel first\n",
    "        # img = img.transpose((2, 0, 1))\n",
    "        # transform\n",
    "        img = self.transform(img)\n",
    "        # label 转换为 idx\n",
    "        label = self.label_to_idx[label]\n",
    "        return img, label\n",
    "    \n",
    "    def __len__(self):\n",
    "        return self.df.shape[0]\n",
    "    \n",
    "IMAGE_SIZE = 32\n",
    "mean, std = [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261]\n",
    "\n",
    "transforms_train = transforms.Compose([\n",
    "        # resize\n",
    "        transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),\n",
    "        # random rotation 40\n",
    "        transforms.RandomRotation(40),\n",
    "        # horizaontal flip\n",
    "        transforms.RandomHorizontalFlip(),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean, std)\n",
    "    ])\n",
    "\n",
    "transforms_eval = transforms.Compose([\n",
    "        # resize\n",
    "        transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean, std)\n",
    "    ])\n",
    "\n",
    "train_ds = Cifar10Dataset(\"train\", transforms_train)\n",
    "eval_ds = Cifar10Dataset(\"eval\", transforms_eval) "
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.076347Z",
     "iopub.status.busy": "2025-01-21T08:17:50.075918Z",
     "iopub.status.idle": "2025-01-21T08:17:50.079491Z",
     "shell.execute_reply": "2025-01-21T08:17:50.079002Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.076324Z"
    }
   },
   "source": [
    "batch_size = 64\n",
    "train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=4)   \n",
    "eval_dl = DataLoader(eval_ds, batch_size=batch_size, shuffle=False, num_workers=4)"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.080075Z",
     "iopub.status.busy": "2025-01-21T08:17:50.079910Z",
     "iopub.status.idle": "2025-01-21T08:17:50.082683Z",
     "shell.execute_reply": "2025-01-21T08:17:50.082228Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.080057Z"
    }
   },
   "source": [
    "# 遍历train_ds得到每张图片，计算每个通道的均值和方差\n",
    "# def cal_mean_std(ds):\n",
    "#     mean = 0.\n",
    "#     std = 0.\n",
    "#     for img, _ in ds:\n",
    "#         mean += img.mean(dim=(1, 2))\n",
    "#         std += img.std(dim=(1, 2))\n",
    "#     mean /= len(ds)\n",
    "#     std /= len(ds)\n",
    "#     return mean, std\n",
    "#\n",
    "# # 经过 normalize 后 均值为0，方差为1\n",
    "# print(cal_mean_std(train_ds))"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.084733Z",
     "iopub.status.busy": "2025-01-21T08:17:50.084474Z",
     "iopub.status.idle": "2025-01-21T08:17:50.091206Z",
     "shell.execute_reply": "2025-01-21T08:17:50.090722Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.084714Z"
    }
   },
   "source": [
    "class InceptionBolck(nn.Module):\n",
    "    #output_channel_for_each_path: 1*1卷积输出通道数，3*3卷积输出通道数，5*5卷积输出通道数，池化层输出通道数\n",
    "    def __init__(self, input_channels: int, output_channel_for_each_path: list[int]):\n",
    "        super(InceptionBolck, self).__init__()\n",
    "        self.conv1_1 = nn.Conv2d(\n",
    "            in_channels=input_channels, \n",
    "            out_channels=output_channel_for_each_path[0],\n",
    "            kernel_size=1, \n",
    "            padding=\"same\"\n",
    "            )\n",
    "        \n",
    "        self.conv3_3 = nn.Conv2d(\n",
    "            in_channels=input_channels, \n",
    "            out_channels=output_channel_for_each_path[1], \n",
    "            kernel_size=3, \n",
    "            padding=\"same\"\n",
    "            )\n",
    "        self.conv5_5 = nn.Conv2d(\n",
    "            in_channels=input_channels, \n",
    "            out_channels=output_channel_for_each_path[2], \n",
    "            kernel_size=5, \n",
    "            padding=\"same\"\n",
    "            ) # 5*5卷积核，要same，上下左右各补两圈0\n",
    "        self.max_pooling = nn.MaxPool2d(\n",
    "            kernel_size=2, \n",
    "            stride=2, \n",
    "            )\n",
    "        \n",
    "    def forward(self, x):\n",
    "        conv1_1 = F.relu(self.conv1_1(x))\n",
    "        conv3_3 = F.relu(self.conv3_3(x))\n",
    "        conv5_5 = F.relu(self.conv5_5(x))\n",
    "        max_pooling = self.max_pooling(x) # 最大池化，图像尺寸缩小一半\n",
    "        \n",
    "        max_pooling_shape = max_pooling.shape[1:] #最后3个维度，即通道、高、宽\n",
    "        input_shape = x.shape[1:] # 输入的通道、高、宽\n",
    "        width_padding = (input_shape[-2] - max_pooling_shape[-2]) // 2 # // 输入尺寸，减去池化尺寸，再除以2\n",
    "        height_padding = (input_shape[-1] - max_pooling_shape[-1]) // 2\n",
    "        padded_pooling = F.pad(\n",
    "            max_pooling, \n",
    "            [width_padding, width_padding, height_padding, height_padding]\n",
    "            ) # [left, right, top, bottom]，在每个维度上填充的长度，默认填充最后两个维度，即高和宽，填充0\n",
    "        concat_output = torch.cat(\n",
    "            [conv1_1, conv3_3, conv5_5, padded_pooling], \n",
    "            dim=1\n",
    "            ) # 在通道维度上拼接\n",
    "        return concat_output\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.092033Z",
     "iopub.status.busy": "2025-01-21T08:17:50.091777Z",
     "iopub.status.idle": "2025-01-21T08:17:50.105370Z",
     "shell.execute_reply": "2025-01-21T08:17:50.104922Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.092014Z"
    }
   },
   "source": [
    "class InceptionNet(nn.Module):\n",
    "    def __init__(self, num_classes=10):\n",
    "        super(InceptionNet, self).__init__()\n",
    "        self.model = nn.Sequential(\n",
    "            nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2),#16*16\n",
    "            InceptionBolck(input_channels=32, output_channel_for_each_path=[16, 16, 16]),#80*16*16\n",
    "            InceptionBolck(input_channels=80, output_channel_for_each_path=[16, 16, 16]),#128*16*16\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2),#8*8\n",
    "            InceptionBolck(input_channels=128, output_channel_for_each_path=[16, 16, 16]),#176*8*8\n",
    "            InceptionBolck(input_channels=176, output_channel_for_each_path=[16, 16, 16]),#224*8*8\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2),#224*4*4\n",
    "            nn.Flatten(),\n",
    "            nn.Linear(3584, num_classes)\n",
    "        )\n",
    "        self.init_weights()\n",
    "        \n",
    "    def init_weights(self):\n",
    "        \"\"\"使用 xavier 均匀分布来初始化全连接层、卷积层的权重 W\"\"\"\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, (nn.Linear, nn.Conv2d)):\n",
    "                nn.init.xavier_uniform_(m.weight)\n",
    "                nn.init.zeros_(m.bias)\n",
    "                \n",
    "    def forward(self, x):\n",
    "        return self.model(x)\n",
    "\n",
    "            \n",
    "for key, value in InceptionNet(len(class_names)).named_parameters():\n",
    "    print(f\"{key:^40}paramerters num: {np.prod(value.shape)}\")"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.105989Z",
     "iopub.status.busy": "2025-01-21T08:17:50.105818Z",
     "iopub.status.idle": "2025-01-21T08:17:50.109871Z",
     "shell.execute_reply": "2025-01-21T08:17:50.109445Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.105971Z"
    }
   },
   "source": [
    "224*4*4"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.110593Z",
     "iopub.status.busy": "2025-01-21T08:17:50.110353Z",
     "iopub.status.idle": "2025-01-21T08:17:50.119409Z",
     "shell.execute_reply": "2025-01-21T08:17:50.118971Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.110575Z"
    },
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "source": [
    "#计算总参数量\n",
    "total_params = sum(p.numel() for p in InceptionNet(len(class_names)).parameters() if p.requires_grad)\n",
    "print(f\"Total params: {total_params:,}\")"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.120038Z",
     "iopub.status.busy": "2025-01-21T08:17:50.119877Z",
     "iopub.status.idle": "2025-01-21T08:17:50.545575Z",
     "shell.execute_reply": "2025-01-21T08:17:50.545104Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.120020Z"
    },
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "source": [
    "from torchvision.models import inception_v3\n",
    "model=inception_v3()\n",
    "model"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.546407Z",
     "iopub.status.busy": "2025-01-21T08:17:50.546152Z",
     "iopub.status.idle": "2025-01-21T08:17:50.550387Z",
     "shell.execute_reply": "2025-01-21T08:17:50.549859Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.546387Z"
    },
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "source": [
    "total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "print(f\"Total params: {total_params:,}\")"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-01-21T08:17:50.551205Z",
     "iopub.status.busy": "2025-01-21T08:17:50.551023Z",
     "iopub.status.idle": "2025-01-21T08:18:24.369013Z",
     "shell.execute_reply": "2025-01-21T08:18:24.368508Z",
     "shell.execute_reply.started": "2025-01-21T08:17:50.551187Z"
    },
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "source": [
    "from torchvision.models import vgg19\n",
    "model=vgg19(pretrained=True)\n",
    "model"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.369868Z",
     "iopub.status.busy": "2025-01-21T08:18:24.369631Z",
     "iopub.status.idle": "2025-01-21T08:18:24.373573Z",
     "shell.execute_reply": "2025-01-21T08:18:24.373010Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.369848Z"
    },
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "source": [
    "224*4*4"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.374287Z",
     "iopub.status.busy": "2025-01-21T08:18:24.374093Z",
     "iopub.status.idle": "2025-01-21T08:18:24.380675Z",
     "shell.execute_reply": "2025-01-21T08:18:24.380214Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.374268Z"
    },
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "source": [
    "#给F.pad写一个例子\n",
    "input_shape = (3, 8, 8)\n",
    "max_pooling_shape = (4, 4)\n",
    "width_padding = (8 - 4) // 2\n",
    "height_padding = (8 - 4) // 2\n",
    "padded_pooling = F.pad(\n",
    "    torch.randn(1, 1,4,4),\n",
    "    [width_padding, width_padding, height_padding, height_padding]\n",
    "    ) # [left, right, top, bottom]，在每个维度上填充的长度，默认填充最后两个维度，即高和宽，填充0\n",
    "padded_pooling"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 定义损失函数和优化器"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.381292Z",
     "iopub.status.busy": "2025-01-21T08:18:24.381135Z",
     "iopub.status.idle": "2025-01-21T08:18:24.383887Z",
     "shell.execute_reply": "2025-01-21T08:18:24.383326Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.381275Z"
    },
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "source": [
    "import torch.optim as optim\n",
    "\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练\n",
    "\n",
    "pytorch的训练需要自行实现，包括\n",
    "1. 定义损失函数\n",
    "2. 定义优化器\n",
    "3. 定义训练步\n",
    "4. 训练"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.384487Z",
     "iopub.status.busy": "2025-01-21T08:18:24.384336Z",
     "iopub.status.idle": "2025-01-21T08:18:24.411153Z",
     "shell.execute_reply": "2025-01-21T08:18:24.410694Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.384470Z"
    }
   },
   "source": [
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "@torch.no_grad()\n",
    "def evaluating(model, dataloader, loss_fct):\n",
    "    loss_list = []\n",
    "    pred_list = []\n",
    "    label_list = []\n",
    "    for datas, labels in dataloader:\n",
    "        datas = datas.to(device)\n",
    "        labels = labels.to(device)\n",
    "        # 前向计算\n",
    "        logits = model(datas)\n",
    "        loss = loss_fct(logits, labels)         # 验证集损失\n",
    "        loss_list.append(loss.item())\n",
    "        \n",
    "        preds = logits.argmax(axis=-1)    # 验证集预测\n",
    "        pred_list.extend(preds.cpu().numpy().tolist())\n",
    "        label_list.extend(labels.cpu().numpy().tolist())\n",
    "        \n",
    "    acc = accuracy_score(label_list, pred_list)\n",
    "    return np.mean(loss_list), acc\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### TensorBoard 可视化\n",
    "\n",
    "\n",
    "训练过程中可以使用如下命令启动tensorboard服务。\n",
    "\n",
    "```shell\n",
    "tensorboard \\\n",
    "    --logdir=runs \\     # log 存放路径\n",
    "    --host 0.0.0.0 \\    # ip\n",
    "    --port 8848         # 端口\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.411961Z",
     "iopub.status.busy": "2025-01-21T08:18:24.411694Z",
     "iopub.status.idle": "2025-01-21T08:18:24.471493Z",
     "shell.execute_reply": "2025-01-21T08:18:24.470962Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.411942Z"
    }
   },
   "source": [
    "from torch.utils.tensorboard import SummaryWriter\n",
    "\n",
    "\n",
    "class TensorBoardCallback:\n",
    "    def __init__(self, log_dir, flush_secs=10):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            log_dir (str): dir to write log.\n",
    "            flush_secs (int, optional): write to dsk each flush_secs seconds. Defaults to 10.\n",
    "        \"\"\"\n",
    "        self.writer = SummaryWriter(log_dir=log_dir, flush_secs=flush_secs)\n",
    "\n",
    "    def draw_model(self, model, input_shape):\n",
    "        self.writer.add_graph(model, input_to_model=torch.randn(input_shape))\n",
    "        \n",
    "    def add_loss_scalars(self, step, loss, val_loss):\n",
    "        self.writer.add_scalars(\n",
    "            main_tag=\"training/loss\", \n",
    "            tag_scalar_dict={\"loss\": loss, \"val_loss\": val_loss},\n",
    "            global_step=step,\n",
    "            )\n",
    "        \n",
    "    def add_acc_scalars(self, step, acc, val_acc):\n",
    "        self.writer.add_scalars(\n",
    "            main_tag=\"training/accuracy\",\n",
    "            tag_scalar_dict={\"accuracy\": acc, \"val_accuracy\": val_acc},\n",
    "            global_step=step,\n",
    "        )\n",
    "        \n",
    "    def add_lr_scalars(self, step, learning_rate):\n",
    "        self.writer.add_scalars(\n",
    "            main_tag=\"training/learning_rate\",\n",
    "            tag_scalar_dict={\"learning_rate\": learning_rate},\n",
    "            global_step=step,\n",
    "            \n",
    "        )\n",
    "    \n",
    "    def __call__(self, step, **kwargs):\n",
    "        # add loss\n",
    "        loss = kwargs.pop(\"loss\", None)\n",
    "        val_loss = kwargs.pop(\"val_loss\", None)\n",
    "        if loss is not None and val_loss is not None:\n",
    "            self.add_loss_scalars(step, loss, val_loss)\n",
    "        # add acc\n",
    "        acc = kwargs.pop(\"acc\", None)\n",
    "        val_acc = kwargs.pop(\"val_acc\", None)\n",
    "        if acc is not None and val_acc is not None:\n",
    "            self.add_acc_scalars(step, acc, val_acc)\n",
    "        # add lr\n",
    "        learning_rate = kwargs.pop(\"lr\", None)\n",
    "        if learning_rate is not None:\n",
    "            self.add_lr_scalars(step, learning_rate)\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Save Best\n"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.472466Z",
     "iopub.status.busy": "2025-01-21T08:18:24.472117Z",
     "iopub.status.idle": "2025-01-21T08:18:24.477717Z",
     "shell.execute_reply": "2025-01-21T08:18:24.477248Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.472444Z"
    }
   },
   "source": [
    "class SaveCheckpointsCallback:\n",
    "    def __init__(self, save_dir, save_step=5000, save_best_only=True):\n",
    "        \"\"\"\n",
    "        Save checkpoints each save_epoch epoch. \n",
    "        We save checkpoint by epoch in this implementation.\n",
    "        Usually, training scripts with pytorch evaluating model and save checkpoint by step.\n",
    "\n",
    "        Args:\n",
    "            save_dir (str): dir to save checkpoint\n",
    "            save_epoch (int, optional): the frequency to save checkpoint. Defaults to 1.\n",
    "            save_best_only (bool, optional): If True, only save the best model or save each model at every epoch.\n",
    "        \"\"\"\n",
    "        self.save_dir = save_dir\n",
    "        self.save_step = save_step\n",
    "        self.save_best_only = save_best_only\n",
    "        self.best_metrics = -1\n",
    "        \n",
    "        # mkdir\n",
    "        if not os.path.exists(self.save_dir):\n",
    "            os.mkdir(self.save_dir)\n",
    "        \n",
    "    def __call__(self, step, state_dict, metric=None):\n",
    "        if step % self.save_step > 0:\n",
    "            return\n",
    "        \n",
    "        if self.save_best_only:\n",
    "            assert metric is not None\n",
    "            if metric >= self.best_metrics:\n",
    "                # save checkpoints\n",
    "                torch.save(state_dict, os.path.join(self.save_dir, \"best.ckpt\"))\n",
    "                # update best metrics\n",
    "                self.best_metrics = metric\n",
    "        else:\n",
    "            torch.save(state_dict, os.path.join(self.save_dir, f\"{step}.ckpt\"))\n",
    "\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Early Stop"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.478600Z",
     "iopub.status.busy": "2025-01-21T08:18:24.478242Z",
     "iopub.status.idle": "2025-01-21T08:18:24.482533Z",
     "shell.execute_reply": "2025-01-21T08:18:24.482085Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.478580Z"
    }
   },
   "source": [
    "class EarlyStopCallback:\n",
    "    def __init__(self, patience=5, min_delta=0.01):\n",
    "        \"\"\"\n",
    "\n",
    "        Args:\n",
    "            patience (int, optional): Number of epochs with no improvement after which training will be stopped.. Defaults to 5.\n",
    "            min_delta (float, optional): Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute \n",
    "                change of less than min_delta, will count as no improvement. Defaults to 0.01.\n",
    "        \"\"\"\n",
    "        self.patience = patience\n",
    "        self.min_delta = min_delta\n",
    "        self.best_metric = -1\n",
    "        self.counter = 0\n",
    "        \n",
    "    def __call__(self, metric):\n",
    "        if metric >= self.best_metric + self.min_delta:\n",
    "            # update best metric\n",
    "            self.best_metric = metric\n",
    "            # reset counter \n",
    "            self.counter = 0\n",
    "        else: \n",
    "            self.counter += 1\n",
    "            \n",
    "    @property\n",
    "    def early_stop(self):\n",
    "        return self.counter >= self.patience\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:18:24.483343Z",
     "iopub.status.busy": "2025-01-21T08:18:24.483173Z",
     "iopub.status.idle": "2025-01-21T08:19:59.858049Z",
     "shell.execute_reply": "2025-01-21T08:19:59.857482Z",
     "shell.execute_reply.started": "2025-01-21T08:18:24.483325Z"
    }
   },
   "source": [
    "# 训练\n",
    "def training(\n",
    "    model, \n",
    "    train_loader, \n",
    "    val_loader, \n",
    "    epoch, \n",
    "    loss_fct, \n",
    "    optimizer, \n",
    "    tensorboard_callback=None,\n",
    "    save_ckpt_callback=None,\n",
    "    early_stop_callback=None,\n",
    "    eval_step=500,\n",
    "    ):\n",
    "    record_dict = {\n",
    "        \"train\": [],\n",
    "        \"val\": []\n",
    "    }\n",
    "    \n",
    "    global_step = 0\n",
    "    model.train()\n",
    "    with tqdm(total=epoch * len(train_loader)) as pbar:\n",
    "        for epoch_id in range(epoch):\n",
    "            # training\n",
    "            for datas, labels in train_loader:\n",
    "                datas = datas.to(device)\n",
    "                labels = labels.to(device)\n",
    "                # 梯度清空\n",
    "                optimizer.zero_grad()\n",
    "                # 模型前向计算\n",
    "                logits = model(datas)\n",
    "                # 计算损失\n",
    "                loss = loss_fct(logits, labels)\n",
    "                # 梯度回传\n",
    "                loss.backward()\n",
    "                # 调整优化器，包括学习率的变动等\n",
    "                optimizer.step()\n",
    "                preds = logits.argmax(axis=-1)\n",
    "            \n",
    "                acc = accuracy_score(labels.cpu().numpy(), preds.cpu().numpy())    \n",
    "                loss = loss.cpu().item()\n",
    "                # record\n",
    "                \n",
    "                record_dict[\"train\"].append({\n",
    "                    \"loss\": loss, \"acc\": acc, \"step\": global_step\n",
    "                })\n",
    "                \n",
    "                # evaluating\n",
    "                if global_step % eval_step == 0:\n",
    "                    model.eval()\n",
    "                    val_loss, val_acc = evaluating(model, val_loader, loss_fct)\n",
    "                    record_dict[\"val\"].append({\n",
    "                        \"loss\": val_loss, \"acc\": val_acc, \"step\": global_step\n",
    "                    })\n",
    "                    model.train()\n",
    "                    \n",
    "                    # 1. 使用 tensorboard 可视化\n",
    "                    if tensorboard_callback is not None:\n",
    "                        tensorboard_callback(\n",
    "                            global_step, \n",
    "                            loss=loss, val_loss=val_loss,\n",
    "                            acc=acc, val_acc=val_acc,\n",
    "                            lr=optimizer.param_groups[0][\"lr\"],\n",
    "                            )\n",
    "                \n",
    "                    # 2. 保存模型权重 save model checkpoint\n",
    "                    if save_ckpt_callback is not None:\n",
    "                        save_ckpt_callback(global_step, model.state_dict(), metric=val_acc)\n",
    "\n",
    "                    # 3. 早停 Early Stop\n",
    "                    if early_stop_callback is not None:\n",
    "                        early_stop_callback(val_acc)\n",
    "                        if early_stop_callback.early_stop:\n",
    "                            print(f\"Early stop at epoch {epoch_id} / global_step {global_step}\")\n",
    "                            return record_dict\n",
    "                    \n",
    "                # udate step\n",
    "                global_step += 1\n",
    "                pbar.update(1)\n",
    "                pbar.set_postfix({\"epoch\": epoch_id})\n",
    "        \n",
    "    return record_dict\n",
    "        \n",
    "\n",
    "epoch = 20\n",
    "\n",
    "model = InceptionNet(num_classes=10)\n",
    "\n",
    "# 1. 定义损失函数 采用交叉熵损失\n",
    "loss_fct = nn.CrossEntropyLoss()\n",
    "# 2. 定义优化器 采用 Rmsprop\n",
    "# Optimizers specified in the torch.optim package\n",
    "optimizer = torch.optim.RMSprop(model.parameters(), lr=0.001, alpha=0.9, eps=1e-07)\n",
    "\n",
    "# 1. tensorboard 可视化\n",
    "if not os.path.exists(\"runs\"):\n",
    "    os.mkdir(\"runs\")\n",
    "    \n",
    "exp_name = \"inception_net\"\n",
    "tensorboard_callback = TensorBoardCallback(f\"runs/{exp_name}\")\n",
    "tensorboard_callback.draw_model(model, [1, 3, IMAGE_SIZE, IMAGE_SIZE])\n",
    "# 2. save best\n",
    "if not os.path.exists(\"checkpoints\"):\n",
    "    os.makedirs(\"checkpoints\")\n",
    "save_ckpt_callback = SaveCheckpointsCallback(f\"checkpoints/{exp_name}\", save_step=len(train_dl), save_best_only=True)\n",
    "# 3. early stop\n",
    "early_stop_callback = EarlyStopCallback(patience=5)\n",
    "\n",
    "model = model.to(device)\n",
    "record = training(\n",
    "    model, \n",
    "    train_dl, \n",
    "    eval_dl, \n",
    "    epoch, \n",
    "    loss_fct, \n",
    "    optimizer, \n",
    "    tensorboard_callback=tensorboard_callback,\n",
    "    save_ckpt_callback=save_ckpt_callback,\n",
    "    early_stop_callback=early_stop_callback,\n",
    "    eval_step=len(train_dl)\n",
    "    )"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:19:59.861123Z",
     "iopub.status.busy": "2025-01-21T08:19:59.860845Z",
     "iopub.status.idle": "2025-01-21T08:20:00.078331Z",
     "shell.execute_reply": "2025-01-21T08:20:00.077666Z",
     "shell.execute_reply.started": "2025-01-21T08:19:59.861099Z"
    }
   },
   "source": [
    "#画线要注意的是损失是不一定在零到1之间的\n",
    "def plot_learning_curves(record_dict, sample_step=500):\n",
    "    # build DataFrame\n",
    "    train_df = pd.DataFrame(record_dict[\"train\"]).set_index(\"step\").iloc[::sample_step]\n",
    "    val_df = pd.DataFrame(record_dict[\"val\"]).set_index(\"step\")\n",
    "\n",
    "    # plot\n",
    "    fig_num = len(train_df.columns)\n",
    "    fig, axs = plt.subplots(1, fig_num, figsize=(5 * fig_num, 5))\n",
    "    for idx, item in enumerate(train_df.columns):    \n",
    "        axs[idx].plot(train_df.index, train_df[item], label=f\"train_{item}\")\n",
    "        axs[idx].plot(val_df.index, val_df[item], label=f\"val_{item}\")\n",
    "        axs[idx].grid()\n",
    "        axs[idx].legend()\n",
    "        # axs[idx].set_xticks(range(0, train_df.index[-1], 5000))\n",
    "        # axs[idx].set_xticklabels(map(lambda x: f\"{int(x/1000)}k\", range(0, train_df.index[-1], 5000)))\n",
    "        axs[idx].set_xlabel(\"step\")\n",
    "    \n",
    "    plt.show()\n",
    "\n",
    "plot_learning_curves(record, sample_step=100)  #横坐标是 steps"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 评估"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:20:00.079168Z",
     "iopub.status.busy": "2025-01-21T08:20:00.078970Z",
     "iopub.status.idle": "2025-01-21T08:20:00.806024Z",
     "shell.execute_reply": "2025-01-21T08:20:00.805429Z",
     "shell.execute_reply.started": "2025-01-21T08:20:00.079146Z"
    }
   },
   "source": [
    "# dataload for evaluating\n",
    "\n",
    "# load checkpoints\n",
    "model.load_state_dict(torch.load(f\"checkpoints/{exp_name}/best.ckpt\", map_location=\"cpu\"))\n",
    "\n",
    "model.eval()\n",
    "loss, acc = evaluating(model, eval_dl, loss_fct)\n",
    "print(f\"loss:     {loss:.4f}\\naccuracy: {acc:.4f}\")"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 推理"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:20:00.806997Z",
     "iopub.status.busy": "2025-01-21T08:20:00.806739Z",
     "iopub.status.idle": "2025-01-21T08:21:58.270987Z",
     "shell.execute_reply": "2025-01-21T08:21:58.270499Z",
     "shell.execute_reply.started": "2025-01-21T08:20:00.806974Z"
    }
   },
   "source": [
    "# test_df\n",
    "test_ds = Cifar10Dataset(\"test\", transform=transforms_eval)\n",
    "test_dl = DataLoader(test_ds, batch_size=batch_size, shuffle=False, drop_last=False)\n",
    "\n",
    "preds_collect = []\n",
    "model.eval()\n",
    "for data, fake_label in tqdm(test_dl):\n",
    "    data = data.to(device=device)\n",
    "    logits = model(data)\n",
    "    preds = [test_ds.idx_to_label[idx] for idx in logits.argmax(axis=-1).cpu().tolist()]\n",
    "    preds_collect.extend(preds)\n",
    "    \n",
    "test_df[\"class\"] = preds_collect\n",
    "test_df.head()"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-21T08:21:58.272012Z",
     "iopub.status.busy": "2025-01-21T08:21:58.271588Z",
     "iopub.status.idle": "2025-01-21T08:21:58.590533Z",
     "shell.execute_reply": "2025-01-21T08:21:58.589993Z",
     "shell.execute_reply.started": "2025-01-21T08:21:58.271992Z"
    },
    "tags": []
   },
   "source": [
    "# 导出 submission.csv\n",
    "test_df.to_csv(\"submission.csv\", index=False)"
   ],
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
