{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "kBRw5QHhBkax"
   },
   "source": [
    "# 数据集导入(预制链接)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# !wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=8ce890e0-0019-4e1e-ac63-14718948f612&at=APZUnTW-e7sn7C7k5UVU2BaxZPGT%3A1721020888524' -O dataset_1.zip\n",
    "# !wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1izP72pHtoXpQvOV8WFCnh_LekzLunyG5&export=download&authuser=0&confirm=t&uuid=8e453e3d-84ac-4f51-9cbf-45d47cbdcc65&at=APZUnTVfJYZBQwnHawB72aq5MPvv%3A1721020973099' -O dataset_2.zip\n",
    "# !wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1djT0tlmLBi15LYZG0dxci1RSjPI94sM8&export=download&authuser=0&confirm=t&uuid=4687dd5d-a001-47f2-bacd-e72d5c7361e4&at=APZUnTWWEM2OCtpaZNuS4UQjMzxc%3A1721021154071' -O dataset_3.zip\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "gaD7ugivEL2R"
   },
   "source": [
    "## 官方版本数据导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # Unzip dataset file\n",
    "# import os\n",
    "# import shutil\n",
    "\n",
    "# # 指定新文件夹的名称\n",
    "# new_folder_A = 'unziped/dataset_1'\n",
    "# new_folder_B = 'unziped/dataset_2'\n",
    "# new_folder_C = 'unziped/dataset_3'\n",
    "\n",
    "# # 在当前目录下创建新文件夹\n",
    "# if not os.path.exists(new_folder_A):\n",
    "#     os.makedirs(new_folder_A)\n",
    "# if not os.path.exists(new_folder_B):\n",
    "#     os.makedirs(new_folder_B)\n",
    "# if not os.path.exists(new_folder_B):\n",
    "#     os.makedirs(new_folder_B)\n",
    "\n",
    "# # 使用!unzip命令将文件解压到新文件夹中\n",
    "# # !unzip trackCtrain.zip -d {new_folder}\n",
    "# !unzip dataset_1.zip -d {new_folder_A}\n",
    "# !unzip dataset_2.zip -d {new_folder_B}\n",
    "# !unzip dataset_3.zip -d {new_folder_C}\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "z0Sek0wtEs5n"
   },
   "source": [
    "## 百度Baseline版本数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "kY81z-fCgPfK"
   },
   "source": [
    "## 自定义导入(在下面代码块导入并解压您的数据集)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ITzT8s2wgZG0"
   },
   "source": [
    "下载我们自己的数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1NwodiQ9U67mCx5YHvyepwMtTgcRCDwLj&export=download&authuser=0&confirm=t&uuid=e063e5b4-7f9d-4eea-a310-7122ba3e9d1e&at=AO7h07dsZXetunKyDdnm6s4aFCgX%3A1726554152822' -O train_and_test.zip\n",
    "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1G6S-4FqZEUFNG2b5AvndrOP2UXQPzeDd&export=download&authuser=0' -O src.zip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Unzip dataset file\n",
    "import os\n",
    "\n",
    "# 指定新文件夹的名称\n",
    "new_folder = 'datasets/converted_dataset'\n",
    "\n",
    "# 在当前目录下创建新文件夹\n",
    "if not os.path.exists(new_folder):\n",
    "    os.makedirs(new_folder)\n",
    "\n",
    "# 使用!unzip命令将文件解压到新文件夹中\n",
    "!unzip train_and_test.zip -d {new_folder}\n",
    "!unzip src.zip"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "PmlOGK6yPVGu"
   },
   "source": [
    "# 包导入规范"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "_Mh7pXUyYOvl"
   },
   "source": [
    "## 直接导入(建议)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "sRLfyacGYGcY"
   },
   "source": [
    "## 通过requirements.txt一次性导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install torch_geometric\n",
    "!pip install --no-index pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://pytorch-geometric.com/whl/torch-2.4.0+cu121.html\n",
    "!pip install -r requirements.txt"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "DUxmPjWWV1sr"
   },
   "source": [
    "# 额外数据导入\n",
    "（此处导入权重文件和额外数据集，在此之外的导入将有被判违规的风险，这里以导入随机生成的Track C的A榜样例提交的zip为例子）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 开始将官方数据集转为h5文件，并计算其他所需特征，例如sdf,normal,ambient_occlusion等\n",
    "# Train_msh_dir='unziped/dataset_1/Dataset/Training_data/Feature'\n",
    "# Train_label_dir='unziped/dataset_1/Dataset/Training_data/Label'\n",
    "# Train_aux_dir='unziped/dataset_1/Dataset/Training_data'\n",
    "# h5_save_path='datasets/trackA'\n",
    "# split='train'\n",
    "# track='A'\n",
    "# # 构建命令行字符串\n",
    "# train_command = f\"python Extract_mesh/parse_dataset.py --msh_dir={Train_msh_dir} --label_dir={Train_label_dir} --aux_dir={Train_aux_dir} --h5_save_path={h5_save_path} --split={split} --track={track}\"\n",
    "# # 在Notebook中执行命令\n",
    "# !{train_command}\n",
    "\n",
    "# Test_msh_dir='unziped/dataset_1/Dataset/Training_data/Feature'\n",
    "# Test_label_dir='unziped/dataset_1/Dataset/Training_data/Label' #实际上没有\n",
    "# Test_aux_dir='unziped/dataset_1/Dataset/Training_data'\n",
    "# h5_save_path='datasets/trackA'\n",
    "# split='test'\n",
    "# track='A'\n",
    "\n",
    "# # 构建命令行字符串\n",
    "# test_command = f\"python Extract_mesh/parse_dataset.py --msh_dir={Test_msh_dir} --label_dir={Test_label_dir} --aux_dir={Test_aux_dir} --h5_save_path={h5_save_path} --split={split} --track={track}\"\n",
    "# # 在Notebook中执行命令\n",
    "# !{test_command}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 开始将官方数据集转为h5文件，并计算其他所需特征，例如sdf,normal,ambient_occlusion等\n",
    "# Train_msh_dir='unziped/dataset_3'\n",
    "# Train_label_dir='unziped/dataset_2'\n",
    "# Train_aux_dir='unziped/dataset_1/Dataset/Testset_track_B/Auxiliary'\n",
    "# h5_save_path='datasets/trackB'\n",
    "# split='train'\n",
    "# track='B'\n",
    "# # 构建命令行字符串\n",
    "# train_command = f\"python Extract_mesh/parse_dataset.py --msh_dir={Train_msh_dir} --label_dir={Train_label_dir} --aux_dir={Train_aux_dir} --h5_save_path={h5_save_path} --split={split} --track={track}\"\n",
    "# # 在Notebook中执行命令\n",
    "# !{train_command}\n",
    "\n",
    "# Test_msh_dir='unziped/dataset_1/Dataset/Testset_track_B/Inference'\n",
    "# Test_label_dir='unziped/dataset_1/Dataset/Testset_track_B/Auxiliary' #实际上没有\n",
    "# Test_aux_dir='unziped/dataset_1/Dataset/Testset_track_B/Auxiliary'\n",
    "# h5_save_path='datasets/trackB'\n",
    "# split='test'\n",
    "# track='B'\n",
    "\n",
    "# # 构建命令行字符串\n",
    "# test_command = f\"python Extract_mesh/parse_dataset.py --msh_dir={Test_msh_dir} --label_dir={Test_label_dir} --aux_dir={Test_aux_dir} --h5_save_path={h5_save_path} --split={split} --track={track}\"\n",
    "# # 在Notebook中执行命令\n",
    "# !{test_command}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# trackA_dir = 'datasets/trackA/train.h5'\n",
    "# trackB_dir = 'datasets/trackB/train.h5'\n",
    "# trackC_dir = 'datasets/trackB/train.h5'\n",
    "# os.makedirs(trackC_dir)\n",
    "\n",
    "# # 构建命令行字符串\n",
    "# test_command = f\"python Extract_mesh/merge_h5.py --A_dir={trackA_dir} --B_dir={trackB_dir} --C_dir={trackC_dir}\"\n",
    "# # 在Notebook中执行命令\n",
    "# !{test_command}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "p7YDBByYeYsB"
   },
   "source": [
    "# 主要库版本检查以及随机种子锁定"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "90Bu7fXRftyr"
   },
   "source": [
    "## 随机种子锁定"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "id": "Y_6xjjJIef1f"
   },
   "outputs": [],
   "source": [
    "# 我们下面指定了种子\n",
    "\n",
    "# import numpy as np\n",
    "# import torch\n",
    "# import tensorflow as tf\n",
    "# import random\n",
    "# import os\n",
    "\n",
    "# def seed_everything(seed=42):\n",
    "#     random.seed(seed)           # Python 内置的随机库\n",
    "#     np.random.seed(seed)        # NumPy 随机数种子\n",
    "#     os.environ['PYTHONHASHSEED'] = str(seed)  # 环境变量\n",
    "\n",
    "#     # TensorFlow 设置\n",
    "#     tf.random.set_seed(seed)\n",
    "#     # 对于单 GPU 或 CPU，以下设置不是必须的\n",
    "#     os.environ['TF_DETERMINISTIC_OPS'] = '1'\n",
    "#     os.environ['TF_CUDNN_DETERMINISTIC'] = '1'\n",
    "\n",
    "#     # PyTorch 设置\n",
    "#     torch.manual_seed(seed)\n",
    "#     torch.cuda.manual_seed(seed)\n",
    "#     torch.cuda.manual_seed_all(seed)  # 如果使用多 GPU\n",
    "#     torch.backends.cudnn.deterministic = True\n",
    "#     torch.backends.cudnn.benchmark = False\n",
    "\n",
    "#     # PaddlePaddle 设置\n",
    "#     paddle.seed(seed)\n",
    "\n",
    "# # 调用函数以设置种子\n",
    "# seed_everything(42)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 联合训练压力场以及速度场，训练所用参数在utils/get_param.py里被设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Jupyter environment detected. Enabling Open3D WebVisualizer.\n",
      "[Open3D INFO] WebRTC GUI backend enabled.\n",
      "[Open3D INFO] WebRTCWindowSystem: HTTP handshake server disabled.\n",
      "Training traj has been loaded time consuming:0.009559392929077148\n",
      "Simulator model saved at /lvm_data/litianyu/mycode-new/CIKM_car_race/TrackA_cross_attn_submit/Logger/net Attu-FVGN; hs 128;/2024-09-17-15:00:37/states/vel_press_0.state\n",
      "Epoch 0 train loss: 0.002671706517537435\n",
      "Epoch 0 train epoc_press_loss: 0.0\n",
      "Epoch 0 train epoc_vel_loss: 0.002671706517537435\n",
      "Epoch 0 completed in 6.40 seconds\n",
      "Simulator model saved at /lvm_data/litianyu/mycode-new/CIKM_car_race/TrackA_cross_attn_submit/Logger/net Attu-FVGN; hs 128;/2024-09-17-15:00:37/states/vel_press_1.state\n",
      "Epoch 1 train loss: 0.0027229916254679363\n",
      "Epoch 1 train epoc_press_loss: 0.0\n",
      "Epoch 1 train epoc_vel_loss: 0.0027229916254679363\n",
      "Epoch 1 completed in 2.56 seconds\n",
      "Training completed\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "import os\n",
    "\n",
    "# 获取当前文件路径（即当前运行环境路径）\n",
    "cwd = os.getcwd()\n",
    "current_path = cwd\n",
    "sys.path.append(current_path)\n",
    "\n",
    "import torch\n",
    "from torch.optim import Adam\n",
    "from dataset.Load_mesh import DatasetFactory\n",
    "from NN.model_importer.importer import FVGN\n",
    "\n",
    "from utils import get_param, scheduler\n",
    "import time\n",
    "from utils.get_param import get_hyperparam\n",
    "from utils.Logger import Logger\n",
    "from utils.losses import LpLoss\n",
    "import random\n",
    "import datetime\n",
    "import numpy as np\n",
    "\n",
    "# configurate parameters\n",
    "params = get_param.params()\n",
    "seed = int(datetime.datetime.now().timestamp())\n",
    "np.random.seed(seed)\n",
    "random.seed(seed)\n",
    "torch.manual_seed(seed)\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "# set dataset dir\n",
    "params.trainset = \"datasets/converted_dataset/train.h5\"\n",
    "params.validset = \"datasets/converted_dataset/test.h5\"\n",
    "params.testset = \"datasets/converted_dataset/test.h5\"\n",
    "\n",
    "\n",
    "# initialize Logger and load model / optimizer if according parameters were given\n",
    "logger = Logger(\n",
    "    get_hyperparam(params),\n",
    "    use_csv=True,\n",
    "    use_tensorboard=False,\n",
    "    params=params,\n",
    "    copy_code=False,\n",
    "    seed=seed,\n",
    ")\n",
    "\n",
    "# initialize Training Dataset\n",
    "start = time.time()\n",
    "datasets_factory = DatasetFactory(\n",
    "    params=params,\n",
    "    train_cd=False,\n",
    "    device=device,\n",
    ")\n",
    "\n",
    "# create dataset objetc\n",
    "train_dataset, train_loader = datasets_factory.create_trainset(\n",
    "    batch_size=params.batch_size,\n",
    "    num_workers=2,\n",
    "    pin_memory=False,\n",
    "    persistent_workers=True,\n",
    "    subsampling=True,\n",
    ")\n",
    "end = time.time()\n",
    "print(\"Training traj has been loaded time consuming:{0}\".format(end - start))\n",
    "\n",
    "# initialize fluid model\n",
    "vel_press_model = FVGN(params)\n",
    "\n",
    "vel_press_model = vel_press_model.to(device)\n",
    "vel_press_model.train()\n",
    "optimizer = Adam(vel_press_model.parameters(), lr=params.lr)\n",
    "\n",
    "\"\"\" >>> lr scheduler settings >>> \"\"\"\n",
    "before_explr_decay_steps = int(params.n_epochs * 0.7)\n",
    "two_step_scheduler = scheduler.ExpLR(\n",
    "    optimizer, decay_steps=params.n_epochs - before_explr_decay_steps, gamma=1e-4\n",
    ")\n",
    "lr_scheduler = scheduler.GradualStepExplrScheduler(\n",
    "    optimizer,\n",
    "    multiplier=1.0,\n",
    "    milestone=[int(params.n_epochs / 2)],\n",
    "    gamma=0.1,\n",
    "    total_epoch=before_explr_decay_steps,\n",
    "    after_scheduler=two_step_scheduler,\n",
    "    expgamma=1e-2,\n",
    "    decay_steps=params.n_epochs - before_explr_decay_steps,\n",
    "    min_lr=1e-6,\n",
    ")\n",
    "\"\"\" <<< lr scheduler settings <<< \"\"\"\n",
    "\n",
    "params.load_index = 0 if params.load_index is None else params.load_index\n",
    "\n",
    "lp_loss = LpLoss(size_average=True)\n",
    "\n",
    "# 初始化用于收集整个数据集的残差的张量\n",
    "epoc_loss = 0\n",
    "\n",
    "# training loop\n",
    "for epoch in range(params.n_epochs):\n",
    "\n",
    "    vel_press_model.train()\n",
    "    epoc_loss = 0\n",
    "    epoc_loss_vel = 0\n",
    "    epoc_loss_press = 0\n",
    "    start = time.time()\n",
    "\n",
    "    for batch_index, graph_node in enumerate(train_loader):\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        graph_node = train_dataset.datapreprocessing(\n",
    "            graph_node.to(device), is_training=True\n",
    "        )\n",
    "\n",
    "        pred_vel, pred_press, pred_pos = vel_press_model(\n",
    "            graph_node=graph_node,\n",
    "            params=params,\n",
    "            target=\"vel_press\",\n",
    "        )\n",
    "\n",
    "        loss_vel = lp_loss(\n",
    "            pred_vel,\n",
    "            graph_node.norm_velocity,\n",
    "            batch=graph_node.batch,\n",
    "            mask=graph_node.mask_vel,\n",
    "        )\n",
    "        loss_press = lp_loss(\n",
    "            pred_press,\n",
    "            graph_node.norm_pressure,\n",
    "            batch=graph_node.batch,\n",
    "            mask=graph_node.mask_press,\n",
    "        )\n",
    "        loss_pos = lp_loss(\n",
    "            pred_pos,\n",
    "            graph_node.pos,\n",
    "            batch=graph_node.batch,\n",
    "            mask=graph_node.mask_pos,\n",
    "        )  # 这里是因为cd已经在前向过程被平均了，因此可以直接取mask\n",
    "        \n",
    "        loss = loss_vel + loss_press + loss_pos\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        epoc_loss_vel += loss_vel.cpu().item()\n",
    "        epoc_loss_press += loss_press.cpu().item()\n",
    "\n",
    "    epoc_loss_vel = epoc_loss_vel / len(train_loader)\n",
    "    epoc_loss_press = epoc_loss_press / len(train_loader)\n",
    "    epoc_loss = epoc_loss_vel + epoc_loss_press\n",
    "    \n",
    "    model_saving_path = logger.save_state(\n",
    "        model=vel_press_model,\n",
    "        optimizer=optimizer,\n",
    "        scheduler=lr_scheduler,\n",
    "        index=f\"vel_press_{epoch}\",\n",
    "    )\n",
    "    \n",
    "    if epoch == 105:\n",
    "        break\n",
    "\n",
    "    print(f\"Epoch {epoch} train loss: {epoc_loss}\")\n",
    "    print(f\"Epoch {epoch} train epoc_press_loss: {epoc_loss_press}\")\n",
    "    print(f\"Epoch {epoch} train epoc_vel_loss: {epoc_loss_vel}\")\n",
    "    print(f\"Epoch {epoch} completed in {time.time() - start:.2f} seconds\")\n",
    "\n",
    "    lr_scheduler.step()\n",
    "\n",
    "print(\"Training completed\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "MBCalHN7bBji"
   },
   "source": [
    "# 推理输出测试集的vel和press案例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: 'Logger/net Attu-FVGN; hs 128;/2024-09-17-00:46:18/states/vel_press_100.state'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[11], line 37\u001b[0m\n\u001b[1;32m     33\u001b[0m vel_press_model\u001b[38;5;241m.\u001b[39meval()\n\u001b[1;32m     35\u001b[0m lp_loss \u001b[38;5;241m=\u001b[39m LpLoss(size_average\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m---> 37\u001b[0m params\u001b[38;5;241m.\u001b[39mload_date_time, params\u001b[38;5;241m.\u001b[39mload_index \u001b[38;5;241m=\u001b[39m \u001b[43mlogger\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_state\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m     38\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvel_press_model\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m     39\u001b[0m \u001b[43m    \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m     40\u001b[0m \u001b[43m    \u001b[49m\u001b[43mscheduler\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m     41\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdatetime\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_date_time\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m     42\u001b[0m \u001b[43m    \u001b[49m\u001b[43mindex\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_index\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m     43\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m     44\u001b[0m \u001b[43m)\u001b[49m\n\u001b[1;32m     45\u001b[0m params\u001b[38;5;241m.\u001b[39mload_index \u001b[38;5;241m=\u001b[39m params\u001b[38;5;241m.\u001b[39mload_index\n\u001b[1;32m     46\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mloaded: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mparams\u001b[38;5;241m.\u001b[39mload_date_time\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mparams\u001b[38;5;241m.\u001b[39mload_index\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
      "File \u001b[0;32m/lvm_data/litianyu/mycode-new/CIKM_car_race/TrackA_submit/utils/Logger.py:304\u001b[0m, in \u001b[0;36mLogger.load_state\u001b[0;34m(self, model, optimizer, scheduler, datetime, index, continue_datetime, device)\u001b[0m\n\u001b[1;32m    300\u001b[0m         \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[1;32m    302\u001b[0m path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLogger/\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m/states/\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m.state\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, datetime, index)\n\u001b[0;32m--> 304\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_checkpoint\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    305\u001b[0m \u001b[43m    \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscheduler\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mscheduler\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mckpdir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice\u001b[49m\n\u001b[1;32m    306\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    308\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m datetime, index\n",
      "File \u001b[0;32m/lvm_data/litianyu/mycode-new/CIKM_car_race/TrackA_submit/NN/model_importer/importer.py:57\u001b[0m, in \u001b[0;36mFVGN.load_checkpoint\u001b[0;34m(self, optimizer, scheduler, ckpdir, device, is_training)\u001b[0m\n\u001b[1;32m     55\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ckpdir \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m     56\u001b[0m     ckpdir \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel_dir\n\u001b[0;32m---> 57\u001b[0m dicts \u001b[38;5;241m=\u001b[39m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[43mckpdir\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmap_location\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     58\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mload_state_dict(dicts[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[1;32m     59\u001b[0m keys \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(dicts\u001b[38;5;241m.\u001b[39mkeys())\n",
      "File \u001b[0;32m~/miniconda3/envs/pt113cu116/lib/python3.10/site-packages/torch/serialization.py:771\u001b[0m, in \u001b[0;36mload\u001b[0;34m(f, map_location, pickle_module, weights_only, **pickle_load_args)\u001b[0m\n\u001b[1;32m    768\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mencoding\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m pickle_load_args\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m    769\u001b[0m     pickle_load_args[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mencoding\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m--> 771\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43m_open_file_like\u001b[49m\u001b[43m(\u001b[49m\u001b[43mf\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mrb\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m opened_file:\n\u001b[1;32m    772\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m _is_zipfile(opened_file):\n\u001b[1;32m    773\u001b[0m         \u001b[38;5;66;03m# The zipfile reader is going to advance the current file position.\u001b[39;00m\n\u001b[1;32m    774\u001b[0m         \u001b[38;5;66;03m# If we want to actually tail call to torch.jit.load, we need to\u001b[39;00m\n\u001b[1;32m    775\u001b[0m         \u001b[38;5;66;03m# reset back to the original position.\u001b[39;00m\n\u001b[1;32m    776\u001b[0m         orig_position \u001b[38;5;241m=\u001b[39m opened_file\u001b[38;5;241m.\u001b[39mtell()\n",
      "File \u001b[0;32m~/miniconda3/envs/pt113cu116/lib/python3.10/site-packages/torch/serialization.py:270\u001b[0m, in \u001b[0;36m_open_file_like\u001b[0;34m(name_or_buffer, mode)\u001b[0m\n\u001b[1;32m    268\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_open_file_like\u001b[39m(name_or_buffer, mode):\n\u001b[1;32m    269\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m _is_path(name_or_buffer):\n\u001b[0;32m--> 270\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_open_file\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname_or_buffer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    271\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    272\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mw\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m mode:\n",
      "File \u001b[0;32m~/miniconda3/envs/pt113cu116/lib/python3.10/site-packages/torch/serialization.py:251\u001b[0m, in \u001b[0;36m_open_file.__init__\u001b[0;34m(self, name, mode)\u001b[0m\n\u001b[1;32m    250\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, name, mode):\n\u001b[0;32m--> 251\u001b[0m     \u001b[38;5;28msuper\u001b[39m(_open_file, \u001b[38;5;28mself\u001b[39m)\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m)\u001b[49m)\n",
      "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'Logger/net Attu-FVGN; hs 128;/2024-09-17-00:46:18/states/vel_press_100.state'"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from dataset.Load_mesh import DatasetFactory\n",
    "import time\n",
    "\n",
    "# # configurate parameters\n",
    "params.load_index = f\"vel_press_100\"\n",
    "params.load_date_time=logger.datetime\n",
    "\n",
    "# initialize Logger and load model / optimizer if according parameters were given\n",
    "logger = Logger(\n",
    "    get_hyperparam(params),\n",
    "    datetime=params.load_date_time,\n",
    "    use_csv=False,\n",
    "    use_tensorboard=False,\n",
    "    copy_code=False,\n",
    ")\n",
    "\n",
    "# create dataset objetc\n",
    "test_dataset, test_loader = datasets_factory.create_testset(\n",
    "    batch_size=1,\n",
    "    num_workers=0,\n",
    "    pin_memory=False,\n",
    "    persistent_workers=False,\n",
    "    subsampling=False,\n",
    ")\n",
    "\n",
    "# initialize fluid model again\n",
    "from NN.model_importer.importer import FVGN\n",
    "\n",
    "vel_press_model = FVGN(params)\n",
    "\n",
    "vel_press_model = vel_press_model.to(device)\n",
    "vel_press_model.eval()\n",
    "\n",
    "lp_loss = LpLoss(size_average=True)\n",
    "\n",
    "params.load_date_time, params.load_index = logger.load_state(\n",
    "    model=vel_press_model,\n",
    "    optimizer=None,\n",
    "    scheduler=None,\n",
    "    datetime=params.load_date_time,\n",
    "    index=params.load_index,\n",
    "    device=device,\n",
    ")\n",
    "params.load_index = params.load_index\n",
    "print(f\"loaded: {params.load_date_time}, {params.load_index}\")\n",
    "params.load_index = 0 if params.load_index is None else params.load_index\n",
    "\n",
    "start = time.time()\n",
    "with torch.no_grad():\n",
    "\n",
    "    epoc_test_loss = 0\n",
    "    cd_pair_list = []\n",
    "    for batch_index, graph_node in enumerate(test_loader):\n",
    "\n",
    "        graph_node = test_dataset.datapreprocessing(\n",
    "            graph_node.to(device),\n",
    "            is_training=False,\n",
    "        )\n",
    "\n",
    "        pred_vel, pred_press, _ = vel_press_model(\n",
    "            graph_node=graph_node,\n",
    "            target=\"vel_press\",\n",
    "            params=params,\n",
    "        )\n",
    "\n",
    "        current_files_name = \"\".join(\n",
    "            chr(code) for code in graph_node.origin_id.cpu().tolist()\n",
    "        )\n",
    "\n",
    "        if current_files_name.endswith(\".vtk\"):\n",
    "\n",
    "            reversed_node_vel = pred_vel\n",
    "            reversed_vel_label = graph_node.norm_velocity\n",
    "\n",
    "            logger.save_test_results_npy(\n",
    "                value=reversed_node_vel.cpu().detach().squeeze().numpy(),\n",
    "                file_name=current_files_name.split(\".\")[0],\n",
    "            )\n",
    "\n",
    "        elif current_files_name.endswith(\".ply\"):\n",
    "            reversed_node_press = (\n",
    "                pred_press * graph_node.phi_std[0, 0]\n",
    "            ) + graph_node.phi_mean[0, 0]\n",
    "\n",
    "            reversed_press_label = (\n",
    "                graph_node.norm_pressure * graph_node.phi_std[0, 0]\n",
    "            ) + graph_node.phi_mean[0, 0]\n",
    "\n",
    "            logger.save_test_results_npy(\n",
    "                value=reversed_node_press.cpu().detach().squeeze().numpy(),\n",
    "                file_name=current_files_name.replace(\"mesh_\", \"press_\").split(\".\")[0],\n",
    "            )\n",
    "\n",
    "print(f\"Generating answer completed completed in {time.time() - start:.2f} seconds\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练cd数据集，会用到我们自定义的数据集，其从原始cd论文中筛选了额外的一些case加入比赛官方给的数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training traj has been loaded time consuming:0.10782432556152344\n",
      "Simulator model saved at /lvm_data/litianyu/mycode-new/CIKM_car_race/TrackA_cross_attn_submit/Logger/net Attu-FVGN; hs 128;/2024-09-17-15:00:37/states/cd_0.state\n",
      "Epoch 0 train epoc_loss_cd: 0.1360441163128269\n",
      "Epoch 0 completed in 126.49 seconds\n",
      "Simulator model saved at /lvm_data/litianyu/mycode-new/CIKM_car_race/TrackA_cross_attn_submit/Logger/net Attu-FVGN; hs 128;/2024-09-17-15:00:37/states/cd_1.state\n",
      "Epoch 1 train epoc_loss_cd: 0.11731438338756561\n",
      "Epoch 1 completed in 127.26 seconds\n",
      "Training completed\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# cd数据集需要更低的学习率\n",
    "params.lr=3e-5\n",
    "params.dataset_size=825\n",
    "params.trainset=\"datasets/converted_dataset/train_sourcepaper_cd_filtered_825.h5\"\n",
    "\n",
    "# reinitialize Training Dataset\n",
    "start = time.time()\n",
    "datasets_factory = DatasetFactory(\n",
    "    params=params,\n",
    "    train_cd=True,\n",
    "    device=device,\n",
    ")\n",
    "\n",
    "# create dataset objetc\n",
    "train_dataset, train_loader = datasets_factory.create_trainset(\n",
    "    batch_size=params.batch_size,\n",
    "    num_workers=2,\n",
    "    pin_memory=False,\n",
    "    persistent_workers=True,\n",
    "    subsampling=True,\n",
    ")\n",
    "end = time.time()\n",
    "print(\"Training traj has been loaded time consuming:{0}\".format(end - start))\n",
    "\n",
    "# reinitialize fluid model\n",
    "model = FVGN(params)\n",
    "cd_model = model.to(device)\n",
    "cd_model.train()\n",
    "optimizer = Adam(cd_model.parameters(), lr=params.lr)\n",
    "\n",
    "\"\"\" >>> lr scheduler settings >>> \"\"\"\n",
    "before_explr_decay_steps = int(params.n_epochs * 0.7)\n",
    "two_step_scheduler = scheduler.ExpLR(\n",
    "    optimizer, decay_steps=params.n_epochs - before_explr_decay_steps, gamma=1e-4\n",
    ")\n",
    "lr_scheduler = scheduler.GradualStepExplrScheduler(\n",
    "    optimizer,\n",
    "    multiplier=1.0,\n",
    "    milestone=[int(params.n_epochs / 2)],\n",
    "    gamma=0.1,\n",
    "    total_epoch=before_explr_decay_steps,\n",
    "    after_scheduler=two_step_scheduler,\n",
    "    expgamma=1e-2,\n",
    "    decay_steps=params.n_epochs - before_explr_decay_steps,\n",
    "    min_lr=1e-6,\n",
    ")\n",
    "\"\"\" <<< lr scheduler settings <<< \"\"\"\n",
    "\n",
    "params.load_index = 0 if params.load_index is None else params.load_index\n",
    "\n",
    "lp_loss = LpLoss(size_average=True)\n",
    "\n",
    "# 初始化用于收集整个数据集的残差的张量\n",
    "epoc_loss = 0\n",
    "\n",
    "# training loop\n",
    "for epoch in range(params.n_epochs):\n",
    "\n",
    "    cd_model.train()\n",
    "    epoc_loss_cd = 0\n",
    "    start = time.time()\n",
    "    for batch_index, graph_node in enumerate(train_loader):\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        graph_node = train_dataset.datapreprocessing(\n",
    "            graph_node.to(device), is_training=True\n",
    "        )\n",
    "        _,_,pred_cd = cd_model(\n",
    "            graph_node=graph_node,\n",
    "            params=params,\n",
    "            target=\"cd\",\n",
    "        )\n",
    "        \n",
    "        loss_cd = lp_loss(\n",
    "            pred_cd,\n",
    "            graph_node.cd_data,\n",
    "            dim=1,\n",
    "        )  # 这里是因为cd已经在前向过程被平均了，因此可以直接取mask\n",
    "        \n",
    "        loss = loss_cd\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        epoc_loss_cd += loss_cd.cpu().item()\n",
    "\n",
    "    epoc_loss_cd = epoc_loss_cd / len(train_loader)\n",
    "    \n",
    "    model_saving_path = logger.save_state(\n",
    "        model=cd_model,\n",
    "        optimizer=optimizer,\n",
    "        scheduler=lr_scheduler,\n",
    "        index=f\"cd_{epoch}\",\n",
    "        dump_para=False,\n",
    "    )\n",
    "    \n",
    "    if epoch == 120:\n",
    "        break\n",
    "\n",
    "    print(f\"Epoch {epoch} train epoc_loss_cd: {epoc_loss_cd}\")\n",
    "    print(f\"Epoch {epoch} completed in {time.time() - start:.2f} seconds\")\n",
    "\n",
    "    lr_scheduler.step()\n",
    "\n",
    "print(\"Training completed\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 推理输出cd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Simulator model and optimizer/scheduler loaded checkpoint Logger/net Attu-FVGN; hs 128;/2024-09-17-15:00:37/states/cd_0.state\n",
      "loaded: 2024-09-17-15:00:37, cd_0\n",
      "Results saved to Answer.csv\n",
      "Generating answer completed completed in 7.73 seconds\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from dataset.Load_mesh import DatasetFactory\n",
    "import time\n",
    "\n",
    "# # configurate parameters\n",
    "params.load_index = f\"cd_113\"\n",
    "params.load_date_time=logger.datetime\n",
    "\n",
    "# create dataset objetc\n",
    "test_dataset, test_loader = datasets_factory.create_testset(\n",
    "    batch_size=1,\n",
    "    num_workers=0,\n",
    "    pin_memory=False,\n",
    "    persistent_workers=False,\n",
    "    subsampling=False,\n",
    ")\n",
    "\n",
    "# initialize fluid model again\n",
    "cd_model = FVGN(params)\n",
    "\n",
    "cd_model = cd_model.to(device)\n",
    "cd_model.eval()\n",
    "\n",
    "lp_loss = LpLoss(size_average=True)\n",
    "\n",
    "params.load_date_time, params.load_index = logger.load_state(\n",
    "    model=cd_model,\n",
    "    optimizer=None,\n",
    "    scheduler=None,\n",
    "    datetime=params.load_date_time,\n",
    "    index=params.load_index,\n",
    "    device=device,\n",
    ")\n",
    "params.load_index = params.load_index\n",
    "print(f\"loaded: {params.load_date_time}, {params.load_index}\")\n",
    "params.load_index = 0 if params.load_index is None else params.load_index\n",
    "\n",
    "\n",
    "start = time.time()\n",
    "with torch.no_grad():\n",
    "\n",
    "    epoc_test_loss = 0\n",
    "    cd_list = [_ for _ in range(50)]\n",
    "    for batch_index, graph_node in enumerate(test_loader):\n",
    "\n",
    "        graph_node = test_dataset.datapreprocessing(\n",
    "            graph_node.to(device),\n",
    "            is_training=False,\n",
    "        )\n",
    "\n",
    "        _,_,pred_cd = cd_model(\n",
    "            graph_node=graph_node,\n",
    "            target=\"cd\",\n",
    "            params=params,\n",
    "        )\n",
    "\n",
    "        current_files_name = \"\".join(\n",
    "            chr(code) for code in graph_node.origin_id.cpu().tolist()\n",
    "        )\n",
    "\n",
    "        if current_files_name.endswith(\".obj\"):\n",
    "\n",
    "            reversed_cd = pred_cd\n",
    "            reversed_cd_label = graph_node.cd_data\n",
    "            cd_list[graph_node.idx.cpu().item()] = reversed_cd.cpu().item()\n",
    "\n",
    "    logger.save_test_results_csv(cd_list, file_name=\"Answer.csv\")\n",
    "\n",
    "print(f\"Generating answer completed completed in {time.time() - start:.2f} seconds\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "_R4ymI9BcKYb",
    "outputId": "d11335d3-d016-48bc-ce76-daa035fe8503"
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import zipfile\n",
    "\n",
    "# 获取要压缩的目录路径\n",
    "dir_to_zip = f\"{logger.saving_path}/gen_answers_A\"\n",
    "# 设置压缩包的名称和路径\n",
    "zip_filename = \"Results.zip\"\n",
    "current_directory = os.getcwd()\n",
    "zip_filepath = os.path.join(current_directory, zip_filename)\n",
    "\n",
    "# 创建压缩包\n",
    "with zipfile.ZipFile(zip_filepath, 'w', zipfile.ZIP_DEFLATED) as zipf:\n",
    "    for root, dirs, files in os.walk(dir_to_zip):\n",
    "        for file in files:\n",
    "            # 创建文件的完整路径\n",
    "            file_path = os.path.join(root, file)\n",
    "            # 将文件写入压缩包，并保持文件在压缩包中的相对路径\n",
    "            zipf.write(file_path, os.path.relpath(file_path, dir_to_zip))\n",
    "\n",
    "print(f\"Files from {dir_to_zip} have been compressed into {zip_filepath}\")\n"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "gpuType": "T4",
   "machine_shape": "hm",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
