{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "针对训练如何进行断点重新开始"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "自动产生CheckPoints\n",
    "保存检查点：\n",
    "在训练过程中，您可以使用Ktrain的autofit​或fit​方法，并通过设置checkpoint_folder​参数来自动保存检查点。Ktrain会自动在每个epoch结束时保存一个检查点到指定的文件夹。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import ktrain\n",
    "from ktrain import vision as kv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 假设您已经准备好了数据和模型\n",
    "# ...\n",
    "\n",
    "# 使用autofit方法进行训练，并指定检查点文件夹\n",
    "learner.autofit(train_data, val_data, epochs=10, checkpoint_folder='/path/to/checkpoint_folder')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "如果不是Ktrain的API那么这个功能的实现会非常的麻烦"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def save_checkpoint(\n",
    "    model,\n",
    "    optimizer,\n",
    "    lr_scheduler,\n",
    "    epoch,\n",
    "    args,\n",
    "    output_dir,\n",
    "    filename=\"checkpoint.pth\",\n",
    "    scaler=None,\n",
    "):\n",
    "    \"\"\"\n",
    "    保存模型的检查点。\n",
    "    Args:\n",
    "        model (nn.Module): 训练好的模型。\n",
    "        optimizer (Optimizer): 优化器。\n",
    "        lr_scheduler (LRScheduler): 学习率调度器。\n",
    "        epoch (int): 当前训练的轮次。\n",
    "        args (argparse.Namespace): 训练参数。\n",
    "        output_dir (str): 输出目录。\n",
    "        filename (str, optional): 检查点文件名。默认为'checkpoint.pth'。\n",
    "        scaler (GradScaler, optional): 如果使用了自动混合精度训练，则包含GradScaler的状态字典。默认为None。\n",
    "    \"\"\"\n",
    "    checkpoint = {\n",
    "        \"model\": model.state_dict(),\n",
    "        \"optimizer\": optimizer.state_dict(),\n",
    "        \"lr_scheduler\": lr_scheduler.state_dict(),\n",
    "        \"args\": vars(args),  # 使用vars将argparse.Namespace转换为字典\n",
    "        \"epoch\": epoch,\n",
    "    }\n",
    "    if scaler is not None:\n",
    "        checkpoint[\"scaler\"] = scaler.state_dict()\n",
    "\n",
    "    torch.save(checkpoint, os.path.join(output_dir, filename))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "加载保存点"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个新的Learner对象，并加载之前的检查点\n",
    "\n",
    "learner = ktrain.get_learner(\n",
    "    model,\n",
    "    train_data=train_data,\n",
    "    val_data=val_data,\n",
    "    checkpoint_folder=\"/path/to/checkpoint_folder\",\n",
    ")\n",
    "\n",
    "learner.load_checkpoint(checkpoint_number_or_name, weights_only=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 从检查点继续训练\n",
    "learner.autofit(train_data, val_data, epochs=10)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "但是如果不是Ktrain那么这个读取的办法会很冗杂"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_checkpoint(\n",
    "    model,\n",
    "    optimizer=None,\n",
    "    lr_scheduler=None,\n",
    "    checkpoint_path=\"checkpoint.pth\",\n",
    "    device=\"cuda\",\n",
    "):\n",
    "    \"\"\"\n",
    "    从检查点加载模型的状态字典。\n",
    "    Args:\n",
    "        model (nn.Module): 需要加载状态的模型。\n",
    "        optimizer (Optimizer, optional): 如果需要，也可以加载优化器的状态字典。默认为None。\n",
    "        lr_scheduler (LRScheduler, optional): 如果需要，也可以加载学习率调度器的状态字典。默认为None。\n",
    "        checkpoint_path (str, optional): 检查点文件的路径。默认为'checkpoint.pth'。\n",
    "        device (str, optional): 设备名称，用于将模型移动到该设备上。默认为'cuda'。\n",
    "\n",
    "    Returns:\n",
    "        tuple: 包含 (model, epoch, optimizer_state_dict, lr_scheduler_state_dict, scaler_state_dict) 的元组。\n",
    "               optimizer_state_dict, lr_scheduler_state_dict, 和 scaler_state_dict 可能是 None。\n",
    "    \"\"\"\n",
    "    checkpoint = torch.load(checkpoint_path, map_location=device)\n",
    "    model.load_state_dict(checkpoint[\"model\"])\n",
    "    epoch = checkpoint[\"epoch\"]\n",
    "    optimizer_state_dict = checkpoint.get(\"optimizer\", None)\n",
    "    lr_scheduler_state_dict = checkpoint.get(\"lr_scheduler\", None)\n",
    "    scaler_state_dict = checkpoint.get(\"scaler\", None)\n",
    "\n",
    "    if optimizer is not None and optimizer_state_dict is not None:\n",
    "        optimizer.load_state_dict(optimizer_state_dict)\n",
    "    if lr_scheduler is not None and lr_scheduler_state_dict is not None:\n",
    "        lr_scheduler.load_state_dict(lr_scheduler_state_dict)\n",
    "\n",
    "    return (\n",
    "        model,\n",
    "        epoch,\n",
    "        optimizer_state_dict,\n",
    "        lr_scheduler_state_dict,\n",
    "        scaler_state_dict,\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "一个完整的模型加载与保存的例子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "\n",
    "# 定义模型\n",
    "class SimpleModel(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(SimpleModel, self).__init__()\n",
    "        self.fc = nn.Linear(1, 1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.fc(x)\n",
    "\n",
    "# 假设的训练数据和目标\n",
    "x_train = torch.randn(100, 1)\n",
    "y_train = 3 * x_train + 2 + torch.randn(100, 1) * 0.1\n",
    "\n",
    "# 创建数据集和数据加载器\n",
    "dataset = TensorDataset(x_train, y_train)\n",
    "dataloader = DataLoader(dataset, batch_size=10, shuffle=True)\n",
    "\n",
    "# 损失函数和优化器\n",
    "criterion = nn.MSELoss()\n",
    "optimizer = optim.SGD(model.parameters(), lr=0.01)\n",
    "\n",
    "# 训练循环（简化版）\n",
    "def train(model, device, dataloader, criterion, optimizer, num_epochs=10):\n",
    "    for epoch in range(num_epochs):\n",
    "        for inputs, targets in dataloader:\n",
    "            inputs, targets = inputs.to(device), targets.to(device)\n",
    "\n",
    "            # 前向传播\n",
    "            outputs = model(inputs)\n",
    "            loss = criterion(outputs, targets)\n",
    "\n",
    "            # 反向传播和优化\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "# 假设我们有一个训练好的模型，现在保存它\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "model = SimpleModel().to(device)\n",
    "train(model, device, dataloader, criterion, optimizer, num_epochs=1)  # 假设训练了一个epoch\n",
    "save_checkpoint(model, optimizer, None, 0, {}, 'output_dir', filename='model_checkpoint.pth')\n",
    "\n",
    "# 现在从检查点加载模型\n",
    "model = SimpleModel()  # 创建一个新的模型实例\n",
    "checkpoint = load_checkpoint(model, optimizer=optim.SGD(model.parameters(), lr=0.01))\n",
    "loaded_model, start_epoch, optimizer_state_dict, _, _ = checkpoint\n",
    "\n",
    "# 如果你想继续训练，可以将优化器的状态字典加载回去\n",
    "optimizer.load_state_dict(optimizer_state_dict)\n",
    "\n",
    "# 现在你可以使用loaded_model进行推断或继续训练\n",
    "# 例如，继续训练：\n",
    "train(loaded_model, device, dataloader, criterion, optimizer, num_epochs=start_epoch+9)  # 从第1个epoch之后开始，再训练9个epoch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
