{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 198,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import ConcatDataset,DataLoader\n",
    "from torch import nn, optim\n",
    "import torch\n",
    "import datetime\n",
    "from tqdm.notebook import tqdm\n",
    "import os\n",
    "#from cascade_lib import *\n",
    "from cascade_libary.networkv1 import create_ArcMultiTaskModel\n",
    "from cascade_libary.dataset import create_exampler_datasets_and_loaders, create_task_datasets_and_loaders,MyDataset,transform_test,transform_train\n",
    "from cascade_libary.train_eval import train_model, evaluate_model\n",
    "from cascade_libary.criterian import get_class_stats,sort_and_complete_alpha_by_label_mapping,MultiClassFocalLossWithAlpha\n",
    "from cascade_libary.utils import freeze_layers\n",
    "from torchvision import datasets as torchvision_datasets\n",
    "import numpy as np\n",
    "import itertools\n",
    "from kornia.losses import FocalLoss\n",
    "import math\n",
    "import torch.nn.functional as F\n",
    "import random\n",
    "\n",
    "#from kornia.losses import FocalLoss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 199,
   "metadata": {},
   "outputs": [],
   "source": [
    "def reweight_alphas(task_class_orders, tsk,alpha_cls, alpha_tsk,w_cls, w_tsk):\n",
    "    \"\"\"\n",
    "    更新alpha_cls和alpha_tsk的函数。\n",
    "    \n",
    "    参数:\n",
    "    task_class_orders -- 任务中类别的顺序列表。\n",
    "    tsk -- 我们关注的是第tsk个任务（索引从1开始）。\n",
    "    w_cls -- alpha_cls需要乘以的权重。\n",
    "    w_tsk -- alpha_tsk需要乘以的权重。\n",
    "    \n",
    "    返回:\n",
    "    更新后的alpha_cls和alpha_tsk列表。\n",
    "    \"\"\"\n",
    "    # 计算第tsk个任务之前的所有任务的类别总数\n",
    "\n",
    "    num_prev_task_classes = sum(len(task) for task in task_class_orders[:tsk])\n",
    "    print(\"num_prev_task_classes:\", num_prev_task_classes)\n",
    "    # 更新alpha_cls\n",
    "    for i in range(num_prev_task_classes):\n",
    "        alpha_cls[i] *= w_cls\n",
    "    \n",
    "    # 更新alpha_tsk\n",
    "    for i in range(tsk):\n",
    "        alpha_tsk[i] *= w_tsk\n",
    "    \n",
    "    return alpha_cls, alpha_tsk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 200,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置随机种子\n",
    "def set_seed(seed_value=42):\n",
    "    random.seed(seed_value)  # Python内置的随机模块\n",
    "    np.random.seed(seed_value)  # NumPy的随机数生成器\n",
    "    '''torch.manual_seed(seed_value)  # PyTorch的随机数生成器\n",
    "    # 如果使用CUDA\n",
    "    torch.cuda.manual_seed_all(seed_value)  # 为所有CUDA设备设置随机种子\n",
    "    torch.backends.cudnn.deterministic = True  # 设置确定性卷积\n",
    "    torch.backends.cudnn.benchmark = False  # 为了确保确定性，关闭 cudnn 基准测试'''\n",
    "# 调用函数设置随机种子\n",
    "set_seed(42)\n",
    "\n",
    "gpu=6\n",
    "if torch.cuda.is_available():\n",
    "        torch.cuda.set_device(gpu)\n",
    "        device = 'cuda'\n",
    "else:\n",
    "        print('WARNING: [CUDA unavailable] Using CPU instead!')\n",
    "        device = 'cpu'\n",
    "#device='cpu'\n",
    "model_save_pth='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade'\n",
    "\n",
    "s=12.5\n",
    "m=0.5"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 初始化"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 201,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 在函数外面加载数据集\n",
    "root_dir = '../data'\n",
    "train_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=True, download=True)\n",
    "test_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=False, download=True)\n",
    "\n",
    "# 随机化类别顺序\n",
    "class_order = list(range(100))\n",
    "random.shuffle(class_order)\n",
    "label_mapping = {original_label: new_label for new_label, original_label in enumerate(class_order)}\n",
    "# 定义每个任务的类别数量列表\n",
    "num_classes_per_task = [50, 5, 5, 5, 5, 5,5,5,5,5,5]\n",
    "\n",
    "batch_size = 256\n",
    "# 调用函数创建任务数据集和数据加载器\n",
    "# task_class_orders包含了随机属性\n",
    "origin_train_set, origin_test_set, train_loaders, test_loaders,task_class_orders = create_task_datasets_and_loaders(train_dataset, test_dataset, num_classes_per_task,class_order, batch_size=batch_size, transform_train=transform_train, transform_test=transform_test)\n",
    "\n",
    "# 假设你已经定义了origin_train_set和origin_test_set\n",
    "# 以及数据转换transform_train和transform_test\n",
    "n_samples_per_class = 20\n",
    "exampler_train_sets, exampler_test_sets, exampler_train_loaders, exampler_test_test_loaders = create_exampler_datasets_and_loaders(origin_train_set, origin_test_set, n_samples_per_class, batch_size, transform_train, transform_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "设置标签转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 202,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确定原始标签的范围（即最大的原始标签值）\n",
    "max_label = max(label_mapping.keys())\n",
    "\n",
    "# 创建一个长度等于原始标签范围的零数组\n",
    "label_mapping_array = np.zeros(max_label + 1, dtype=int)\n",
    "\n",
    "# 使用 label_mapping 字典填充这个数组\n",
    "for original_label, new_label in label_mapping.items():\n",
    "    label_mapping_array[original_label] = new_label\n",
    "\n",
    "# 将这个数组转换为 PyTorch 张量\n",
    "label_mapping_tensor = torch.from_numpy(label_mapping_array).to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Task 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 203,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'model = create_ArcMultiTaskModel(\\'resnet34\\',512,None)\\nmodel.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\\nmodel.add_incremental_task_layer(isInitial=True) \\n\\nfull_path=\\'/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.744_20241224_054155.pth\\'\\n# 加载模型\\nif os.path.isfile(full_path):\\n    model.load_state_dict(torch.load(full_path,weights_only=True),strict=False)\\n    print(f\"模型已从 {full_path} 成功加载。\")\\nelse:\\n    print(f\"文件 {full_path} 不存在。\")'"
      ]
     },
     "execution_count": 203,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "\n",
    "full_path='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.744_20241224_054155.pth'\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True),strict=False)\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 204,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'tsk=0\\n\\n# 设置训练参数\\nnum_epochs=10\\nweight_decay=0.001\\nmax_lr=0.01\\n\\ncriterion=F.cross_entropy\\ncriterion_label=F.cross_entropy\\noptimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\\nscheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \\n                                                steps_per_epoch=len(train_loaders[tsk]),three_phase=True)\\n# 训练模型\\nmodel.to(device)\\nmodel=train_model(device, model, task_class_orders,{\\'train\\': train_loaders[tsk], \\'val\\': test_loaders[tsk]}, \\n            criterion, criterion_label,1,optimizer,scheduler,label_mapping=label_mapping, \\n            num_epochs=num_epochs,use_weighted_loss=False)\\n\\n# 在完整数据集上评估模型\\nloss, accuracy = evaluate_model(device,label_mapping,model, {\\'val\\': test_loaders[tsk]}, criterion)\\nprint(f\\'Loss on test set: {loss:.4f}\\')\\nprint(f\\'Accuracy on test set: {accuracy:.4f}\\')\\n\\n# 保存模型\\n# 格式化准确率，保留三位小数，并将其转换为字符串\\naccuracy_str = f\"{accuracy:.3f}\"\\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\\n# 使用准确率值作为文件名的一部分\\nfile_name = f\"cascade_tsk_{str(tsk)}_{accuracy_str}_{current_time}.pth\"\\n# 确保保存路径存在，如果不存在则创建\\nif not os.path.exists(model_save_pth):\\n    os.makedirs(model_save_pth, exist_ok=True)\\n# 保存模型到指定路径\\ntorch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'"
      ]
     },
     "execution_count": 204,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''tsk=0\n",
    "\n",
    "# 设置训练参数\n",
    "num_epochs=10\n",
    "weight_decay=0.001\n",
    "max_lr=0.01\n",
    "\n",
    "criterion=F.cross_entropy\n",
    "criterion_label=F.cross_entropy\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(train_loaders[tsk]),three_phase=True)\n",
    "# 训练模型\n",
    "model.to(device)\n",
    "model=train_model(device, model, task_class_orders,{'train': train_loaders[tsk], 'val': test_loaders[tsk]}, \n",
    "            criterion, criterion_label,1,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)\n",
    "\n",
    "# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[tsk]}, criterion)\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Task 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 205,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'tsk=1\\n# 重新初始化模型并加载预训练参数\\nmodel = create_ArcMultiTaskModel(\\'resnet34\\',512,None)\\nmodel.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\\nmodel.add_incremental_task_layer(isInitial=True) \\n\\nfile_name=\\'cascade_tsk_0_0.754_20241224_084407.pth\\'\\nfull_path=os.path.join(model_save_pth,file_name)\\n# 加载模型\\nif os.path.isfile(full_path):\\n    model.load_state_dict(torch.load(full_path,weights_only=True))\\n    print(f\"模型已从 {full_path} 成功加载。\")\\nelse:\\n    print(f\"文件 {full_path} 不存在。\")\\n\\n# 添加当前任务对应层\\nmodel.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=m)\\nmodel.add_incremental_task_layer() '"
      ]
     },
     "execution_count": 205,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''tsk=1\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "\n",
    "file_name='cascade_tsk_0_0.754_20241224_084407.pth'\n",
    "full_path=os.path.join(model_save_pth,file_name)\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "\n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=m)\n",
    "model.add_incremental_task_layer() '''\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练 Tsk 0 的 exampler 和 Task 1 的 数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 206,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"# 指定要冻结的层前缀\\nfreeze_prefixes = [\\n    'feature_extractor',\\n    'incremental_layers.0',\\n    'arc_classifiers.0.4',\\n\\n]\\nfreeze_layers(model, freeze_prefixes)\\n\\n# 使用ConcatDataset合并这两个数据集\\ncombined_train_set = ConcatDataset([exampler_train_sets[0], origin_train_set[1]])\\n# 创建一个新的DataLoader来加载合并后的数据集\\ncombined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \\n                                   batch_size=batch_size, shuffle=True, num_workers=4)\\n# 使用ConcatDataset合并这两个数据集\\ncombined_test_set = []\\nfor i in range(tsk+1):\\n    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\\n    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\\n    combined_test_set.append(origin_test_set[i])\\ncombined_test_set=ConcatDataset(combined_test_set)\\n# 创建一个新的DataLoader来加载合并后的数据集\\ncombined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \\n                                  batch_size=batch_size, shuffle=False, num_workers=4)\""
      ]
     },
     "execution_count": 206,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 指定要冻结的层前缀\n",
    "freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'incremental_layers.0',\n",
    "    'arc_classifiers.0.4',\n",
    "\n",
    "]\n",
    "freeze_layers(model, freeze_prefixes)\n",
    "\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_train_set = ConcatDataset([exampler_train_sets[0], origin_train_set[1]])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                   batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_test_set = []\n",
    "for i in range(tsk+1):\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_test_set.append(origin_test_set[i])\n",
    "combined_test_set=ConcatDataset(combined_test_set)\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                  batch_size=batch_size, shuffle=False, num_workers=4)'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 207,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'num_epochs=30\\nweight_decay=0.01\\nmax_lr=0.01\\nweight_label=0.5\\n# 创建调度器和优化器\\noptimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\\nscheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \\n                                                steps_per_epoch=len(combined_train_loader),three_phase=False)\\nclass_stats = get_class_stats(combined_train_loader)\\n# 计算补齐后的 alpha\\nalpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\\nalpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\\nprint(\"alpha_cls:\", alpha_cls)\\nprint(\"alpha_tsk:\", alpha_tsk)\\nalpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \\n                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\\n                w_cls=1, w_tsk=1)\\nalpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\\nprint(\"alpha_cls:\", alpha_cls)\\nprint(\"alpha_tsk:\", alpha_tsk)\\ncriterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=0,reduction=\\'mean\\') \\n#criterion=None\\ncriterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction=\\'mean\\') '"
      ]
     },
     "execution_count": 207,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''num_epochs=30\n",
    "weight_decay=0.01\n",
    "max_lr=0.01\n",
    "weight_label=0.5\n",
    "# 创建调度器和优化器\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(combined_train_loader),three_phase=False)\n",
    "class_stats = get_class_stats(combined_train_loader)\n",
    "# 计算补齐后的 alpha\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                w_cls=1, w_tsk=1)\n",
    "alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=0,reduction='mean') \n",
    "#criterion=None\n",
    "criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction='mean') '''\n",
    "#criterion_label=None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 208,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"# 训练模型\\nmodel.to(device)\\nmodel=train_model(device, model, \\n            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \\n            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \\n            num_epochs=num_epochs,use_weighted_loss=False)\""
      ]
     },
     "execution_count": 208,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 训练模型\n",
    "model.to(device)\n",
    "model=train_model(device, model, \n",
    "            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)'''\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 209,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\\n\\nloss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\""
      ]
     },
     "execution_count": 209,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "注意，后面的m都是0\n",
    "\n",
    "以loss训练\n",
    "\n",
    "gamma 0，0\n",
    "Loss on test set: 1.7423\n",
    "Accuracy on test set: 0.6492\n",
    "Loss on test set: 0.9681\n",
    "Accuracy on test set: 0.7240\n",
    "\n",
    "Loss on test set: 1.6719\n",
    "Accuracy on test set: 0.6560\n",
    "\n",
    "gamma 1.5, 0\n",
    "Loss on test set: 1.6412\n",
    "Accuracy on test set: 0.6472\n",
    "Loss on test set: 1.0033\n",
    "Accuracy on test set: 0.6900\n",
    "\n",
    "Loss on test set: 1.5832\n",
    "Accuracy on test set: 0.6511\n",
    "\n",
    "gamma 1，1\n",
    " {0: 0.8079060684958943, 1: 0.8607562595809913}\n",
    " Loss on test set: 1.7054\n",
    "Accuracy on test set: 0.6378\n",
    "Loss on test set: 0.8537\n",
    "Accuracy on test set: 0.7240\n",
    "\n",
    "Loss on test set: 1.6280\n",
    "Accuracy on test set: 0.6456\n",
    "\n",
    "gamma 2 0\n",
    "Loss on test set: 1.5750\n",
    "Accuracy on test set: 0.6606\n",
    "Loss on test set: 1.1483\n",
    "Accuracy on test set: 0.6480\n",
    "\n",
    "Loss on test set: 1.5362\n",
    "Accuracy on test set: 0.6595\n",
    "\n",
    "w cls 1.2\n",
    "Loss on test set: 1.5992\n",
    "Accuracy on test set: 0.6546\n",
    "Loss on test set: 1.1136\n",
    "Accuracy on test set: 0.6420\n",
    "\n",
    "Loss on test set: 1.5550\n",
    "Accuracy on test set: 0.6535\n",
    "\n",
    "gamma 2 1\n",
    "Loss on test set: 1.6132\n",
    "Accuracy on test set: 0.6446\n",
    "Loss on test set: 0.9171\n",
    "Accuracy on test set: 0.7220\n",
    "\n",
    "Loss on test set: 1.5499\n",
    "Accuracy on test set: 0.6516\n",
    "\n",
    "\n",
    "以acc训练\n",
    "\n",
    "\n",
    "gamma 2，0\n",
    "Loss on test set: 1.6671\n",
    "Accuracy on test set: 0.6392\n",
    "Loss on test set: 0.8624\n",
    "Accuracy on test set: 0.7320\n",
    "\n",
    "Loss on test set: 1.5939\n",
    "Accuracy on test set: 0.6476"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 210,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"# 在完整数据集上评估模型\\nloss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\""
      ]
     },
     "execution_count": 210,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')'''\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 211,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'# 保存模型\\n# 格式化准确率，保留三位小数，并将其转换为字符串\\naccuracy_str = f\"{accuracy:.3f}\"\\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\\n# 使用准确率值作为文件名的一部分\\nfile_name = f\"cascade_tsk_{str(tsk)}_NT2_{accuracy_str}_{current_time}.pth\"\\n# 确保保存路径存在，如果不存在则创建\\nif not os.path.exists(model_save_pth):\\n    os.makedirs(model_save_pth, exist_ok=True)\\n# 保存模型到指定路径\\ntorch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'"
      ]
     },
     "execution_count": 211,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_NT2_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练 Tsk 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 212,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 1\n",
      "Incremental Task Layer added, current len: 1\n",
      "Incremental ARC Classifier added, current len: 2\n",
      "Incremental Task Layer added, current len: 2\n",
      "模型已从 /home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_1_NT2_0.684_20241224_135328.pth 成功加载。\n",
      "Incremental ARC Classifier added, current len: 3\n",
      "Incremental Task Layer added, current len: 3\n"
     ]
    }
   ],
   "source": [
    "tsk=2\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=m)\n",
    "model.add_incremental_task_layer() \n",
    "\n",
    "#full_path='/home/ubuntu/workspace/incremental_learning/save_models/cascade/cascade_tsk_1_P1_0.701_20241219_032040.pth'\n",
    "full_path='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_1_NT2_0.684_20241224_135328.pth'\n",
    "\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[2], s=s, m=m)\n",
    "model.add_incremental_task_layer() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 213,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "incremental_layers.2.layer.0.weight\n",
      "incremental_layers.2.layer.0.bias\n",
      "incremental_layers.2.layer.1.weight\n",
      "incremental_layers.2.layer.1.bias\n",
      "incremental_layers.2.layer.3.weight\n",
      "incremental_layers.2.layer.3.bias\n",
      "incremental_layers.2.layer.4.weight\n",
      "incremental_layers.2.layer.4.bias\n",
      "incremental_layers.2.layer.6.weight\n",
      "incremental_layers.2.layer.6.bias\n",
      "incremental_layers.2.layer.7.weight\n",
      "incremental_layers.2.layer.7.bias\n",
      "incremental_layers.2.gate_generator.0.weight\n",
      "incremental_layers.2.gate_generator.0.bias\n",
      "incremental_layers.2.gate_generator.1.weight\n",
      "incremental_layers.2.gate_generator.1.bias\n",
      "incremental_layers.2.gate_generator.5.weight\n",
      "incremental_layers.2.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "arc_classifiers.2.4.weight\n"
     ]
    }
   ],
   "source": [
    "freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "\n",
    "    'incremental_layers.0',\n",
    "    'incremental_layers.1',\n",
    "    'arc_classifiers.0.4',\n",
    "    'arc_classifiers.1.4',\n",
    "]\n",
    "\n",
    "freeze_layers(model, freeze_prefixes)\n",
    "\n",
    "combined_train_set = []\n",
    "for i in range(tsk):\n",
    "    #print(i)\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_train_set.append(exampler_train_sets[i])\n",
    "combined_train_set.append(origin_train_set[tsk])\n",
    "combined_train_set=ConcatDataset(combined_train_set)\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "\n",
    "\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_test_set = []\n",
    "for i in range(tsk+1):\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_test_set.append(origin_test_set[i])\n",
    "combined_test_set=ConcatDataset(combined_test_set)\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                batch_size=batch_size, shuffle=False, num_workers=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 214,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 3.59998704 35.99870405  1.43999793]\n",
      "num_prev_task_classes: 55\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 43.57236687 435.7095518   17.4289844 ]\n"
     ]
    }
   ],
   "source": [
    "num_epochs=60\n",
    "weight_decay=0.001\n",
    "max_lr=0.001\n",
    "weight_label=0.5\n",
    "# 创建调度器和优化器\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(combined_train_loader),three_phase=True)\n",
    "class_stats = get_class_stats(combined_train_loader)\n",
    "# 计算补齐后的 alpha\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                w_cls=1, w_tsk=1)\n",
    "alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=0,reduction='mean') \n",
    "criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction='mean') \n",
    "#criterion_label = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 215,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e476bb8601514c51b65137f4e3efd7c4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/900 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/60 - Train Loss: 74.9028\n",
      "Per Task Accuracy: {0: 0.43305900227905797, 1: 0.4454423367466846, 2: 0.7925113084268721}\n",
      "Validation Loss: 295.8974 - Validation Accuracy: 0.6013\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 69.0332\n",
      "Per Task Accuracy: {0: 0.3947201823246391, 1: 0.5090649655867047, 2: 0.841011894789747}\n",
      "Validation Loss: 296.3434 - Validation Accuracy: 0.5983\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 61.3428\n",
      "Per Task Accuracy: {0: 0.4010129146619397, 1: 0.6095769682726204, 2: 0.8065421343608644}\n",
      "Validation Loss: 290.7572 - Validation Accuracy: 0.6028\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 53.2420\n",
      "Per Task Accuracy: {0: 0.42452519625221574, 1: 0.5847322477757261, 2: 0.8119869324845033}\n",
      "Validation Loss: 291.3074 - Validation Accuracy: 0.6027\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 52.1470\n",
      "Per Task Accuracy: {0: 0.42645606482653836, 1: 0.6228386771865033, 2: 0.8067515496733121}\n",
      "Validation Loss: 288.0121 - Validation Accuracy: 0.6057\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 47.5700\n",
      "Per Task Accuracy: {0: 0.4350848316029374, 1: 0.6465922444183314, 2: 0.7818311274920422}\n",
      "Validation Loss: 283.7852 - Validation Accuracy: 0.6107\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 44.5616\n",
      "Per Task Accuracy: {0: 0.437218283109648, 1: 0.658552962900789, 2: 0.7806584017423354}\n",
      "Validation Loss: 283.8869 - Validation Accuracy: 0.6125\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 48.2832\n",
      "Per Task Accuracy: {0: 0.44228285641934667, 1: 0.6902383750209837, 2: 0.7735801641816049}\n",
      "Validation Loss: 283.5869 - Validation Accuracy: 0.6153\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 41.7164\n",
      "Per Task Accuracy: {0: 0.4382185363383135, 1: 0.655867047171395, 2: 0.7825850226168537}\n",
      "Validation Loss: 280.0624 - Validation Accuracy: 0.6217\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 37.8419\n",
      "Per Task Accuracy: {0: 0.4339642947581666, 1: 0.6691707235185496, 2: 0.7813285307421679}\n",
      "Validation Loss: 280.8992 - Validation Accuracy: 0.6287\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 38.0986\n",
      "Per Task Accuracy: {0: 0.4547480374778425, 1: 0.6479352022830284, 2: 0.7695175071201206}\n",
      "Validation Loss: 278.5038 - Validation Accuracy: 0.6332\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 38.4134\n",
      "Per Task Accuracy: {0: 0.46378197011901745, 1: 0.6097028705724358, 2: 0.7719467247445133}\n",
      "Validation Loss: 276.4070 - Validation Accuracy: 0.6348\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 33.7827\n",
      "Per Task Accuracy: {0: 0.4808115978728792, 1: 0.6904062447540709, 2: 0.7363461216284135}\n",
      "Validation Loss: 273.9608 - Validation Accuracy: 0.6395\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 36.1659\n",
      "Per Task Accuracy: {0: 0.4702646239554318, 1: 0.6677018633540373, 2: 0.7406181940023454}\n",
      "Validation Loss: 276.7356 - Validation Accuracy: 0.6365\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 37.3544\n",
      "Per Task Accuracy: {0: 0.47200557103064067, 1: 0.7272116837334228, 2: 0.730691908192327}\n",
      "Validation Loss: 280.1047 - Validation Accuracy: 0.6357\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 35.3386\n",
      "Per Task Accuracy: {0: 0.4809065586224361, 1: 0.7113899613899614, 2: 0.7326185290668453}\n",
      "Validation Loss: 278.8874 - Validation Accuracy: 0.6418\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 32.7212\n",
      "Per Task Accuracy: {0: 0.46571283869334007, 1: 0.6399194225281182, 2: 0.7509633104372592}\n",
      "Validation Loss: 283.4213 - Validation Accuracy: 0.6378\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 31.6913\n",
      "Per Task Accuracy: {0: 0.45464041529501137, 1: 0.6735353365788148, 2: 0.7524711006868823}\n",
      "Validation Loss: 284.5949 - Validation Accuracy: 0.6370\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 33.0508\n",
      "Per Task Accuracy: {0: 0.45282983033679414, 1: 0.6743746852442505, 2: 0.7506701289998324}\n",
      "Validation Loss: 283.3429 - Validation Accuracy: 0.6405\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 33.6547\n",
      "Per Task Accuracy: {0: 0.48325525449480883, 1: 0.7116417659895921, 2: 0.6883900150779025}\n",
      "Validation Loss: 280.9294 - Validation Accuracy: 0.6432\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 33.0916\n",
      "Per Task Accuracy: {0: 0.4698721195239301, 1: 0.6666946449555146, 2: 0.7494974032501257}\n",
      "Validation Loss: 283.6168 - Validation Accuracy: 0.6455\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 30.4074\n",
      "Per Task Accuracy: {0: 0.46448467966573814, 1: 0.6464663421185161, 2: 0.7438013067515497}\n",
      "Validation Loss: 283.5205 - Validation Accuracy: 0.6445\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 29.5467\n",
      "Per Task Accuracy: {0: 0.4537351228159028, 1: 0.6998908846734934, 2: 0.7690986764952253}\n",
      "Validation Loss: 284.5848 - Validation Accuracy: 0.6442\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 30.0924\n",
      "Per Task Accuracy: {0: 0.465092428462902, 1: 0.7061859996642605, 2: 0.7511308426872173}\n",
      "Validation Loss: 283.2488 - Validation Accuracy: 0.6487\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 35.2557\n",
      "Per Task Accuracy: {0: 0.4726956191440871, 1: 0.7009820379385597, 2: 0.7427542301893114}\n",
      "Validation Loss: 286.0398 - Validation Accuracy: 0.6437\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 30.4618\n",
      "Per Task Accuracy: {0: 0.46823879463155227, 1: 0.6945190532147054, 2: 0.7217708158820573}\n",
      "Validation Loss: 283.3359 - Validation Accuracy: 0.6467\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 34.0335\n",
      "Per Task Accuracy: {0: 0.478285641934667, 1: 0.680795702534833, 2: 0.7317389847545652}\n",
      "Validation Loss: 284.6121 - Validation Accuracy: 0.6460\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 30.5602\n",
      "Per Task Accuracy: {0: 0.47219549252975435, 1: 0.7180627832801746, 2: 0.727131847880717}\n",
      "Validation Loss: 283.4149 - Validation Accuracy: 0.6448\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 29.0304\n",
      "Per Task Accuracy: {0: 0.4998670549506204, 1: 0.7060600973644452, 2: 0.7147344613838164}\n",
      "Validation Loss: 286.0713 - Validation Accuracy: 0.6462\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 28.1433\n",
      "Per Task Accuracy: {0: 0.4753418586984047, 1: 0.6729058250797382, 2: 0.7168286145082928}\n",
      "Validation Loss: 285.5661 - Validation Accuracy: 0.6458\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 29.7002\n",
      "Per Task Accuracy: {0: 0.46085084831602935, 1: 0.7047171394997482, 2: 0.7479058468755235}\n",
      "Validation Loss: 285.4598 - Validation Accuracy: 0.6422\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 28.0287\n",
      "Per Task Accuracy: {0: 0.4804064320081033, 1: 0.715670639583683, 2: 0.7278857430055286}\n",
      "Validation Loss: 287.1698 - Validation Accuracy: 0.6455\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 27.5830\n",
      "Per Task Accuracy: {0: 0.48416054697391747, 1: 0.6477253651166694, 2: 0.7387753392528061}\n",
      "Validation Loss: 287.5071 - Validation Accuracy: 0.6490\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 28.0305\n",
      "Per Task Accuracy: {0: 0.4775702709546721, 1: 0.7129427564210173, 2: 0.7439688390015078}\n",
      "Validation Loss: 286.8754 - Validation Accuracy: 0.6470\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 27.1319\n",
      "Per Task Accuracy: {0: 0.4863889592301849, 1: 0.726036595601813, 2: 0.7198860780700285}\n",
      "Validation Loss: 286.3009 - Validation Accuracy: 0.6498\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 26.6235\n",
      "Per Task Accuracy: {0: 0.4761521904279564, 1: 0.6990095685747859, 2: 0.7358016418160496}\n",
      "Validation Loss: 289.9940 - Validation Accuracy: 0.6480\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 27.9980\n",
      "Per Task Accuracy: {0: 0.49388452772853886, 1: 0.697079066644284, 2: 0.7385659239403585}\n",
      "Validation Loss: 290.7445 - Validation Accuracy: 0.6480\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 29.3494\n",
      "Per Task Accuracy: {0: 0.522176500379843, 1: 0.6995971126405909, 2: 0.6837409951415647}\n",
      "Validation Loss: 291.8917 - Validation Accuracy: 0.6458\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 31.3191\n",
      "Per Task Accuracy: {0: 0.5094960749556849, 1: 0.8296961557831123, 2: 0.6093985592226504}\n",
      "Validation Loss: 288.8178 - Validation Accuracy: 0.6498\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 27.4609\n",
      "Per Task Accuracy: {0: 0.4667257533552798, 1: 0.6611549437636394, 2: 0.7480733791254817}\n",
      "Validation Loss: 291.9309 - Validation Accuracy: 0.6473\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 27.7192\n",
      "Per Task Accuracy: {0: 0.4624588503418587, 1: 0.6892311566224609, 2: 0.7518428547495393}\n",
      "Validation Loss: 291.6599 - Validation Accuracy: 0.6468\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 31.8526\n",
      "Per Task Accuracy: {0: 0.4709926563687009, 1: 0.7048010743662918, 2: 0.7382727425029318}\n",
      "Validation Loss: 290.6847 - Validation Accuracy: 0.6470\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 29.4624\n",
      "Per Task Accuracy: {0: 0.47838060268422383, 1: 0.7483632701024006, 2: 0.6945049422013737}\n",
      "Validation Loss: 284.1474 - Validation Accuracy: 0.6478\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 29.7479\n",
      "Per Task Accuracy: {0: 0.4959230184856926, 1: 0.6941413463152594, 2: 0.7231948400067013}\n",
      "Validation Loss: 284.9843 - Validation Accuracy: 0.6513\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 27.7178\n",
      "Per Task Accuracy: {0: 0.49428969359331476, 1: 0.7131945610206479, 2: 0.7180013402579997}\n",
      "Validation Loss: 286.3338 - Validation Accuracy: 0.6527\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 27.5949\n",
      "Per Task Accuracy: {0: 0.4780703975690048, 1: 0.7777824408259191, 2: 0.674819902831295}\n",
      "Validation Loss: 290.7813 - Validation Accuracy: 0.6470\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 27.2550\n",
      "Per Task Accuracy: {0: 0.47158774373259055, 1: 0.6863354037267081, 2: 0.7528899313117775}\n",
      "Validation Loss: 289.7969 - Validation Accuracy: 0.6488\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 27.5924\n",
      "Per Task Accuracy: {0: 0.48212205621676374, 1: 0.6809216048346484, 2: 0.7327022951918244}\n",
      "Validation Loss: 291.7187 - Validation Accuracy: 0.6467\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 25.8110\n",
      "Per Task Accuracy: {0: 0.4798113446442137, 1: 0.7325415477589391, 2: 0.7321996984419501}\n",
      "Validation Loss: 294.0589 - Validation Accuracy: 0.6432\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 30.8447\n",
      "Per Task Accuracy: {0: 0.5063497087870347, 1: 0.7196155783112305, 2: 0.6944630591388843}\n",
      "Validation Loss: 291.4422 - Validation Accuracy: 0.6503\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 31.4333\n",
      "Per Task Accuracy: {0: 0.4904406178779438, 1: 0.7390045324827933, 2: 0.6669877701457531}\n",
      "Validation Loss: 288.2761 - Validation Accuracy: 0.6488\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 26.6977\n",
      "Per Task Accuracy: {0: 0.49612560141808054, 1: 0.6798724190028538, 2: 0.7093734293851567}\n",
      "Validation Loss: 292.4208 - Validation Accuracy: 0.6497\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 26.8376\n",
      "Per Task Accuracy: {0: 0.4916687769055457, 1: 0.6837334228638576, 2: 0.7347545652538113}\n",
      "Validation Loss: 294.0106 - Validation Accuracy: 0.6475\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 26.0520\n",
      "Per Task Accuracy: {0: 0.4786908077994429, 1: 0.7126909518213866, 2: 0.7225247110068688}\n",
      "Validation Loss: 295.0256 - Validation Accuracy: 0.6457\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 25.7356\n",
      "Per Task Accuracy: {0: 0.4948024816409218, 1: 0.7212103407755581, 2: 0.7051851231362037}\n",
      "Validation Loss: 292.6107 - Validation Accuracy: 0.6505\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 25.8454\n",
      "Per Task Accuracy: {0: 0.4911559888579387, 1: 0.7086201107940239, 2: 0.7187133523203216}\n",
      "Validation Loss: 293.9631 - Validation Accuracy: 0.6477\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 25.4375\n",
      "Per Task Accuracy: {0: 0.4985692580400101, 1: 0.7236444519053215, 2: 0.7075305746356173}\n",
      "Validation Loss: 294.2659 - Validation Accuracy: 0.6472\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 26.3243\n",
      "Per Task Accuracy: {0: 0.4912636110407698, 1: 0.719699513177774, 2: 0.7209750376947562}\n",
      "Validation Loss: 294.8901 - Validation Accuracy: 0.6480\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 27.2398\n",
      "Per Task Accuracy: {0: 0.48182451253481895, 1: 0.6915813328856807, 2: 0.7276344446305913}\n",
      "Validation Loss: 293.2657 - Validation Accuracy: 0.6523\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 25.6019\n",
      "Per Task Accuracy: {0: 0.4863762977969106, 1: 0.7370740305522914, 2: 0.7217708158820573}\n",
      "Validation Loss: 294.1977 - Validation Accuracy: 0.6487\n"
     ]
    }
   ],
   "source": [
    "# 训练模型\n",
    "model.to(device)\n",
    "model=train_model(device, model, \n",
    "            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 216,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss on test set: 1.8652\n",
      "Accuracy on test set: 0.6516\n",
      "Loss on test set: 1.3721\n",
      "Accuracy on test set: 0.6040\n",
      "Loss on test set: 1.2203\n",
      "Accuracy on test set: 0.6640\n"
     ]
    }
   ],
   "source": [
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[2]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 218,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss on test set: 1.7703\n",
      "Accuracy on test set: 0.6487\n"
     ]
    }
   ],
   "source": [
    "# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 219,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_NT2_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 217,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'combined_tset_set = []\\nfor i in range(tsk):\\n    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\\n    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\\n    combined_tset_set.append(origin_test_set[i])\\n# 创建一个新的DataLoader来加载合并后的数据集\\ncombined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \\n                                  batch_size=batch_size, shuffle=False, num_workers=4)\\n\\n# 在完整数据集上评估模型\\nloss, accuracy = evaluate_model(device,label_mapping,model, {\\'val\\': combined_test_loader}, criterion)\\nprint(f\\'Loss on test set: {loss:.4f}\\')\\nprint(f\\'Accuracy on test set: {accuracy:.4f}\\')\\n\\n# 保存模型\\n# 格式化准确率，保留三位小数，并将其转换为字符串\\naccuracy_str = f\"{accuracy:.3f}\"\\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\\n# 使用准确率值作为文件名的一部分\\nfile_name = f\"cascade_tsk_{str(tsk)}_P2_{accuracy_str}_{current_time}.pth\"\\n# 确保保存路径存在，如果不存在则创建\\nif not os.path.exists(model_save_pth):\\n    os.makedirs(model_save_pth, exist_ok=True)\\n# 保存模型到指定路径\\ntorch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'"
      ]
     },
     "execution_count": 217,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''combined_tset_set = []\n",
    "for i in range(tsk):\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_tset_set.append(origin_test_set[i])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                  batch_size=batch_size, shuffle=False, num_workers=4)\n",
    "\n",
    "# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader}, criterion)\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_P2_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Task 2"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "dl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
