{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import ConcatDataset,DataLoader\n",
    "from torch import nn, optim\n",
    "import torch\n",
    "import datetime\n",
    "from tqdm.notebook import tqdm\n",
    "import os\n",
    "#from cascade_lib import *\n",
    "from cascade_libary.network import create_ArcMultiTaskModel\n",
    "from cascade_libary.dataset import create_exampler_datasets_and_loaders, create_task_datasets_and_loaders,MyDataset,transform_test,transform_train\n",
    "from cascade_libary.train_eval import train_model, evaluate_model\n",
    "from cascade_libary.criterian import get_class_stats,sort_and_complete_alpha_by_label_mapping,MultiClassFocalLossWithAlpha\n",
    "from cascade_libary.utils import freeze_layers\n",
    "from torchvision import datasets as torchvision_datasets\n",
    "import numpy as np\n",
    "import itertools\n",
    "from kornia.losses import FocalLoss\n",
    "import math\n",
    "import torch.nn.functional as F\n",
    "import random\n",
    "\n",
    "#from kornia.losses import FocalLoss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def reweight_alphas(task_class_orders, tsk,alpha_cls, alpha_tsk,w_cls, w_tsk):\n",
    "    \"\"\"\n",
    "    更新alpha_cls和alpha_tsk的函数。\n",
    "    \n",
    "    参数:\n",
    "    task_class_orders -- 任务中类别的顺序列表。\n",
    "    tsk -- 我们关注的是第tsk个任务（索引从1开始）。\n",
    "    w_cls -- alpha_cls需要乘以的权重。\n",
    "    w_tsk -- alpha_tsk需要乘以的权重。\n",
    "    \n",
    "    返回:\n",
    "    更新后的alpha_cls和alpha_tsk列表。\n",
    "    \"\"\"\n",
    "    # 计算第tsk个任务之前的所有任务的类别总数\n",
    "\n",
    "    num_prev_task_classes = sum(len(task) for task in task_class_orders[:tsk])\n",
    "    print(\"num_prev_task_classes:\", num_prev_task_classes)\n",
    "    # 更新alpha_cls\n",
    "    for i in range(num_prev_task_classes):\n",
    "        alpha_cls[i] *= w_cls\n",
    "    \n",
    "    # 更新alpha_tsk\n",
    "    for i in range(tsk):\n",
    "        alpha_tsk[i] *= w_tsk\n",
    "    \n",
    "    return alpha_cls, alpha_tsk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置随机种子\n",
    "def set_seed(seed_value=42):\n",
    "    random.seed(seed_value)  # Python内置的随机模块\n",
    "    np.random.seed(seed_value)  # NumPy的随机数生成器\n",
    "    '''torch.manual_seed(seed_value)  # PyTorch的随机数生成器\n",
    "    # 如果使用CUDA\n",
    "    torch.cuda.manual_seed_all(seed_value)  # 为所有CUDA设备设置随机种子\n",
    "    torch.backends.cudnn.deterministic = True  # 设置确定性卷积\n",
    "    torch.backends.cudnn.benchmark = False  # 为了确保确定性，关闭 cudnn 基准测试'''\n",
    "# 调用函数设置随机种子\n",
    "set_seed(42)\n",
    "\n",
    "gpu=6\n",
    "if torch.cuda.is_available():\n",
    "        torch.cuda.set_device(gpu)\n",
    "        device = 'cuda'\n",
    "else:\n",
    "        print('WARNING: [CUDA unavailable] Using CPU instead!')\n",
    "        device = 'cpu'\n",
    "#device='cpu'\n",
    "model_save_pth='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade'\n",
    "\n",
    "s=12.5\n",
    "m=0.5"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 初始化"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 在函数外面加载数据集\n",
    "root_dir = '../data'\n",
    "train_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=True, download=True)\n",
    "test_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=False, download=True)\n",
    "\n",
    "# 随机化类别顺序\n",
    "class_order = list(range(100))\n",
    "random.shuffle(class_order)\n",
    "label_mapping = {original_label: new_label for new_label, original_label in enumerate(class_order)}\n",
    "# 定义每个任务的类别数量列表\n",
    "num_classes_per_task = [50, 5, 5, 5, 5, 5,5,5,5,5,5]\n",
    "\n",
    "batch_size = 64\n",
    "# 调用函数创建任务数据集和数据加载器\n",
    "# task_class_orders包含了随机属性\n",
    "origin_train_set, origin_test_set, train_loaders, test_loaders,task_class_orders = create_task_datasets_and_loaders(train_dataset, test_dataset, num_classes_per_task,class_order, batch_size=batch_size, transform_train=transform_train, transform_test=transform_test)\n",
    "\n",
    "# 假设你已经定义了origin_train_set和origin_test_set\n",
    "# 以及数据转换transform_train和transform_test\n",
    "n_samples_per_class = 20\n",
    "exampler_train_sets, exampler_test_sets, exampler_train_loaders, exampler_test_test_loaders = create_exampler_datasets_and_loaders(origin_train_set, origin_test_set, n_samples_per_class, batch_size, transform_train, transform_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "设置标签转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确定原始标签的范围（即最大的原始标签值）\n",
    "max_label = max(label_mapping.keys())\n",
    "\n",
    "# 创建一个长度等于原始标签范围的零数组\n",
    "label_mapping_array = np.zeros(max_label + 1, dtype=int)\n",
    "\n",
    "# 使用 label_mapping 字典填充这个数组\n",
    "for original_label, new_label in label_mapping.items():\n",
    "    label_mapping_array[original_label] = new_label\n",
    "\n",
    "# 将这个数组转换为 PyTorch 张量\n",
    "label_mapping_tensor = torch.from_numpy(label_mapping_array).to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = create_ArcMultiTaskModel('resnet34',512,None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Task 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\\nmodel.add_incremental_task_layer(isInitial=True) '"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) '''\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'tsk=0\\n\\n# 设置训练参数\\nnum_epochs=150\\nweight_decay=0.001\\nmax_lr=0.01\\n\\ncriterion=F.cross_entropy\\ncriterion_label=F.cross_entropy\\noptimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\\nscheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \\n                                                steps_per_epoch=len(train_loaders[tsk]),three_phase=True)\\n# 训练模型\\nmodel.to(device)\\nmodel=train_model(device, model, task_class_orders,{\\'train\\': train_loaders[tsk], \\'val\\': test_loaders[tsk]}, \\n            criterion, criterion_label,1,optimizer,scheduler,label_mapping=label_mapping, \\n            num_epochs=num_epochs,use_weighted_loss=False)\\n\\n# 在完整数据集上评估模型\\nloss, accuracy = evaluate_model(device,label_mapping,model, {\\'val\\': test_loaders[tsk]}, criterion)\\nprint(f\\'Loss on test set: {loss:.4f}\\')\\nprint(f\\'Accuracy on test set: {accuracy:.4f}\\')\\n\\n# 保存模型\\n# 格式化准确率，保留三位小数，并将其转换为字符串\\naccuracy_str = f\"{accuracy:.3f}\"\\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\\n# 使用准确率值作为文件名的一部分\\nfile_name = f\"cascade_tsk_{str(tsk)}_{accuracy_str}_{current_time}.pth\"\\n# 确保保存路径存在，如果不存在则创建\\nif not os.path.exists(model_save_pth):\\n    os.makedirs(model_save_pth, exist_ok=True)\\n# 保存模型到指定路径\\ntorch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''tsk=0\n",
    "\n",
    "# 设置训练参数\n",
    "num_epochs=150\n",
    "weight_decay=0.001\n",
    "max_lr=0.01\n",
    "\n",
    "criterion=F.cross_entropy\n",
    "criterion_label=F.cross_entropy\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(train_loaders[tsk]),three_phase=True)\n",
    "# 训练模型\n",
    "model.to(device)\n",
    "model=train_model(device, model, task_class_orders,{'train': train_loaders[tsk], 'val': test_loaders[tsk]}, \n",
    "            criterion, criterion_label,1,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)\n",
    "\n",
    "# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[tsk]}, criterion)\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Task 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'tsk=1\\n# 重新初始化模型并加载预训练参数\\nmodel = create_ArcMultiTaskModel(\\'resnet34\\',512,None)\\nmodel.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\\nmodel.add_incremental_task_layer(isInitial=True) \\n\\nfile_name=\\'cascade_tsk_0_0.734_20241219_122057.pth\\'\\nfull_path=os.path.join(model_save_pth,file_name)\\n# 加载模型\\nif os.path.isfile(full_path):\\n    model.load_state_dict(torch.load(full_path,weights_only=True))\\n    print(f\"模型已从 {full_path} 成功加载。\")\\nelse:\\n    print(f\"文件 {full_path} 不存在。\")\\n\\n# 添加当前任务对应层\\nmodel.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=m)\\nmodel.add_incremental_task_layer() '"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''tsk=1\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "\n",
    "file_name='cascade_tsk_0_0.734_20241219_122057.pth'\n",
    "full_path=os.path.join(model_save_pth,file_name)\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "\n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=m)\n",
    "model.add_incremental_task_layer() '''\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练 Tsk 0 的 exampler 和 Task 1 的 数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"# 指定要冻结的层前缀\\nfreeze_prefixes = [\\n    'feature_extractor',\\n    'avgpool',\\n    'fc',\\n    'incremental_layers.0',\\n    #'arc_classifiers.0',\\n\\n]\\nfreeze_layers(model, freeze_prefixes)\\n\\n# 使用ConcatDataset合并这两个数据集\\ncombined_train_set = ConcatDataset([exampler_train_sets[0], origin_train_set[1]])\\n# 创建一个新的DataLoader来加载合并后的数据集\\ncombined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \\n                                   batch_size=batch_size, shuffle=True, num_workers=4)\\n# 使用ConcatDataset合并这两个数据集\\ncombined_test_set = []\\nfor i in range(tsk+1):\\n    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\\n    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\\n    combined_test_set.append(origin_test_set[i])\\ncombined_test_set=ConcatDataset(combined_test_set)\\n# 创建一个新的DataLoader来加载合并后的数据集\\ncombined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \\n                                  batch_size=batch_size, shuffle=False, num_workers=4)\""
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 指定要冻结的层前缀\n",
    "freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    'fc',\n",
    "    'incremental_layers.0',\n",
    "    #'arc_classifiers.0',\n",
    "\n",
    "]\n",
    "freeze_layers(model, freeze_prefixes)\n",
    "\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_train_set = ConcatDataset([exampler_train_sets[0], origin_train_set[1]])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                   batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_test_set = []\n",
    "for i in range(tsk+1):\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_test_set.append(origin_test_set[i])\n",
    "combined_test_set=ConcatDataset(combined_test_set)\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                  batch_size=batch_size, shuffle=False, num_workers=4)'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'num_epochs=90\\nweight_decay=0.001\\nmax_lr=0.01\\nweight_label=40\\n\\n# 创建调度器和优化器\\noptimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\\nscheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \\n                                                steps_per_epoch=len(combined_train_loader),three_phase=True)\\nclass_stats = get_class_stats(combined_train_loader)\\n# 计算补齐后的 alpha\\nalpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\\nalpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\\nprint(\"alpha_cls:\", alpha_cls)\\nprint(\"alpha_tsk:\", alpha_tsk)\\nalpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \\n                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\\n                w_cls=1, w_tsk=1)\\nprint(\"alpha_cls:\", alpha_cls)\\nprint(\"alpha_tsk:\", alpha_tsk)\\ncriterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=1.5,reduction=\\'mean\\') \\n#criterion=None\\ncriterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction=\\'mean\\') \\n#criterion_label=None'"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''num_epochs=90\n",
    "weight_decay=0.001\n",
    "max_lr=0.01\n",
    "weight_label=40\n",
    "\n",
    "# 创建调度器和优化器\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(combined_train_loader),three_phase=True)\n",
    "class_stats = get_class_stats(combined_train_loader)\n",
    "# 计算补齐后的 alpha\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                w_cls=1, w_tsk=1)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=1.5,reduction='mean') \n",
    "#criterion=None\n",
    "criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction='mean') \n",
    "#criterion_label=None'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"# 训练模型\\nmodel.to(device)\\nmodel=train_model(device, model, \\n            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \\n            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \\n            num_epochs=num_epochs,use_weighted_loss=False)\\n\""
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 训练模型\n",
    "model.to(device)\n",
    "model=train_model(device, model, \n",
    "            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\\n\\nloss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\""
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "注意，后面的m都是0\n",
    "\n",
    "以loss训练\n",
    "\n",
    "gamma 0，0\n",
    "Loss on test set: 1.7423\n",
    "Accuracy on test set: 0.6492\n",
    "Loss on test set: 0.9681\n",
    "Accuracy on test set: 0.7240\n",
    "\n",
    "Loss on test set: 1.6719\n",
    "Accuracy on test set: 0.6560\n",
    "\n",
    "gamma 1.5, 0\n",
    "Loss on test set: 1.6412\n",
    "Accuracy on test set: 0.6472\n",
    "Loss on test set: 1.0033\n",
    "Accuracy on test set: 0.6900\n",
    "\n",
    "Loss on test set: 1.5832\n",
    "Accuracy on test set: 0.6511\n",
    "\n",
    "gamma 1，1\n",
    " {0: 0.8079060684958943, 1: 0.8607562595809913}\n",
    " Loss on test set: 1.7054\n",
    "Accuracy on test set: 0.6378\n",
    "Loss on test set: 0.8537\n",
    "Accuracy on test set: 0.7240\n",
    "\n",
    "Loss on test set: 1.6280\n",
    "Accuracy on test set: 0.6456\n",
    "\n",
    "gamma 2 0\n",
    "Loss on test set: 1.5750\n",
    "Accuracy on test set: 0.6606\n",
    "Loss on test set: 1.1483\n",
    "Accuracy on test set: 0.6480\n",
    "\n",
    "Loss on test set: 1.5362\n",
    "Accuracy on test set: 0.6595\n",
    "\n",
    "w cls 1.2\n",
    "Loss on test set: 1.5992\n",
    "Accuracy on test set: 0.6546\n",
    "Loss on test set: 1.1136\n",
    "Accuracy on test set: 0.6420\n",
    "\n",
    "Loss on test set: 1.5550\n",
    "Accuracy on test set: 0.6535\n",
    "\n",
    "gamma 2 1\n",
    "Loss on test set: 1.6132\n",
    "Accuracy on test set: 0.6446\n",
    "Loss on test set: 0.9171\n",
    "Accuracy on test set: 0.7220\n",
    "\n",
    "Loss on test set: 1.5499\n",
    "Accuracy on test set: 0.6516\n",
    "\n",
    "\n",
    "以acc训练\n",
    "\n",
    "\n",
    "gamma 2，0\n",
    "Loss on test set: 1.6671\n",
    "Accuracy on test set: 0.6392\n",
    "Loss on test set: 0.8624\n",
    "Accuracy on test set: 0.7320\n",
    "\n",
    "Loss on test set: 1.5939\n",
    "Accuracy on test set: 0.6476"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"# 在完整数据集上评估模型\\nloss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\""
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "'''# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')'''\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'# 保存模型\\n# 格式化准确率，保留三位小数，并将其转换为字符串\\naccuracy_str = f\"{accuracy:.3f}\"\\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\\n# 使用准确率值作为文件名的一部分\\nfile_name = f\"cascade_tsk_{str(tsk)}_NT1_{accuracy_str}_{current_time}.pth\"\\n# 确保保存路径存在，如果不存在则创建\\nif not os.path.exists(model_save_pth):\\n    os.makedirs(model_save_pth, exist_ok=True)\\n# 保存模型到指定路径\\ntorch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_NT1_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练 Tsk 0/1 的 均衡exampler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'tsk=1\\n# 重新初始化模型并加载预训练参数\\nmodel = create_ArcMultiTaskModel(\\'resnet34\\',512,None)\\nmodel.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\\nmodel.add_incremental_task_layer(isInitial=True) \\n# 添加当前任务对应层\\nmodel.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=m)\\nmodel.add_incremental_task_layer() \\n\\n#full_path=\\'/home/ubuntu/workspace/incremental_learning/save_models/cascade/cascade_tsk_1_P1_0.701_20241219_032040.pth\\'\\nfull_path=\\'/home/ubuntu/workspace/incremental_learning/save_models/cascade/cascade_tsk_1_P1_0.612_20241219_151325.pth\\'\\n\\n# 加载模型\\nif os.path.isfile(full_path):\\n    model.load_state_dict(torch.load(full_path,weights_only=True))\\n    print(f\"模型已从 {full_path} 成功加载。\")\\nelse:\\n    print(f\"文件 {full_path} 不存在。\")'"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''tsk=1\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=m)\n",
    "model.add_incremental_task_layer() \n",
    "\n",
    "#full_path='/home/ubuntu/workspace/incremental_learning/save_models/cascade/cascade_tsk_1_P1_0.701_20241219_032040.pth'\n",
    "full_path='/home/ubuntu/workspace/incremental_learning/save_models/cascade/cascade_tsk_1_P1_0.612_20241219_151325.pth'\n",
    "\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'# 使用ConcatDataset合并这两个数据集\\ncombined_test_set = ConcatDataset([exampler_test_sets[0], exampler_test_sets[1]])\\n# 创建一个新的DataLoader来加载合并后的数据集\\ncombined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), batch_size=batch_size, shuffle=False, num_workers=4)'"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    'fc',\n",
    "    'incremental_layers.0',\n",
    "    'incremental_layers.1.layer',\n",
    "    'arc_classifiers',\n",
    "]\n",
    "\n",
    "freeze_layers(model, freeze_prefixes)\n",
    "\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_train_set = ConcatDataset([exampler_train_sets[0], exampler_train_sets[1]])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), batch_size=batch_size, shuffle=True, num_workers=4)'''\n",
    "\n",
    "'''# 使用ConcatDataset合并这两个数据集\n",
    "combined_test_set = ConcatDataset([exampler_test_sets[0], exampler_test_sets[1]])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), batch_size=batch_size, shuffle=False, num_workers=4)'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'num_epochs=90\\nweight_decay=0.001\\nmax_lr=0.001\\nweight_label=5\\n# 创建调度器和优化器\\noptimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\\nscheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \\n                                                steps_per_epoch=len(combined_train_loader),three_phase=True)\\nclass_stats = get_class_stats(combined_train_loader)\\n# 计算补齐后的 alpha\\nalpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\\nprint(\"alpha_cls:\", alpha_cls)\\nprint(\"alpha_tsk:\", alpha_tsk)\\nalpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \\n                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\\n                w_cls=1, w_tsk=1)\\nprint(\"alpha_cls:\", alpha_cls)\\nprint(\"alpha_tsk:\", alpha_tsk)\\ncriterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=2,reduction=\\'mean\\') \\ncriterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=2,reduction=\\'mean\\') '"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''num_epochs=90\n",
    "weight_decay=0.001\n",
    "max_lr=0.001\n",
    "weight_label=5\n",
    "# 创建调度器和优化器\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(combined_train_loader),three_phase=True)\n",
    "class_stats = get_class_stats(combined_train_loader)\n",
    "# 计算补齐后的 alpha\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                w_cls=1, w_tsk=1)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=2,reduction='mean') \n",
    "criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=2,reduction='mean') '''\n",
    "#criterion_label = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"# 训练模型\\nmodel.to(device)\\nmodel=train_model(device, model, \\n            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \\n            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \\n            num_epochs=num_epochs,use_weighted_loss=False)\""
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''# 训练模型\n",
    "model.to(device)\n",
    "model=train_model(device, model, \n",
    "            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\\n\\nloss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\\nprint(f'Loss on test set: {loss:.4f}')\\nprint(f'Accuracy on test set: {accuracy:.4f}')\""
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'combined_tset_set = []\\nfor i in range(tsk):\\n    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\\n    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\\n    combined_tset_set.append(origin_test_set[i])\\n# 创建一个新的DataLoader来加载合并后的数据集\\ncombined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \\n                                  batch_size=batch_size, shuffle=False, num_workers=4)\\n\\n# 在完整数据集上评估模型\\nloss, accuracy = evaluate_model(device,label_mapping,model, {\\'val\\': combined_test_loader}, criterion)\\nprint(f\\'Loss on test set: {loss:.4f}\\')\\nprint(f\\'Accuracy on test set: {accuracy:.4f}\\')\\n\\n# 保存模型\\n# 格式化准确率，保留三位小数，并将其转换为字符串\\naccuracy_str = f\"{accuracy:.3f}\"\\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\\n# 使用准确率值作为文件名的一部分\\nfile_name = f\"cascade_tsk_{str(tsk)}_P2_{accuracy_str}_{current_time}.pth\"\\n# 确保保存路径存在，如果不存在则创建\\nif not os.path.exists(model_save_pth):\\n    os.makedirs(model_save_pth, exist_ok=True)\\n# 保存模型到指定路径\\ntorch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''combined_tset_set = []\n",
    "for i in range(tsk):\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_tset_set.append(origin_test_set[i])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                  batch_size=batch_size, shuffle=False, num_workers=4)\n",
    "\n",
    "# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader}, criterion)\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_P2_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Task 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 1\n",
      "Incremental Task Layer added, current len: 1\n",
      "Incremental ARC Classifier added, current len: 2\n",
      "Incremental Task Layer added, current len: 2\n",
      "模型已从 /home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_1_NT1_0.659_20241222_131707.pth 成功加载。\n",
      "Loss on test set: 1.5750\n",
      "Accuracy on test set: 0.6606\n",
      "Loss on test set: 1.1483\n",
      "Accuracy on test set: 0.6480\n"
     ]
    }
   ],
   "source": [
    "tsk=2\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=0)\n",
    "model.add_incremental_task_layer() \n",
    "\n",
    "file_name='cascade_tsk_1_NT1_0.659_20241222_131707.pth'\n",
    "full_path=os.path.join(model_save_pth,file_name)\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 3\n",
      "Incremental Task Layer added, current len: 3\n"
     ]
    }
   ],
   "source": [
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[2], s=s, m=0)\n",
    "model.add_incremental_task_layer() "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练 Tsk 0/1 的 exampler 和 Task 2 的 数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing fc.0.weight\n",
      "----Freezing fc.0.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "incremental_layers.1.gate_generator.0.weight\n",
      "incremental_layers.1.gate_generator.0.bias\n",
      "incremental_layers.1.gate_generator.1.weight\n",
      "incremental_layers.1.gate_generator.1.bias\n",
      "incremental_layers.1.gate_generator.5.weight\n",
      "incremental_layers.1.gate_generator.5.bias\n",
      "incremental_layers.2.layer.0.weight\n",
      "incremental_layers.2.layer.0.bias\n",
      "incremental_layers.2.layer.1.weight\n",
      "incremental_layers.2.layer.1.bias\n",
      "incremental_layers.2.layer.3.weight\n",
      "incremental_layers.2.layer.3.bias\n",
      "incremental_layers.2.layer.4.weight\n",
      "incremental_layers.2.layer.4.bias\n",
      "incremental_layers.2.layer.6.weight\n",
      "incremental_layers.2.layer.6.bias\n",
      "incremental_layers.2.layer.7.weight\n",
      "incremental_layers.2.layer.7.bias\n",
      "incremental_layers.2.gate_generator.0.weight\n",
      "incremental_layers.2.gate_generator.0.bias\n",
      "incremental_layers.2.gate_generator.1.weight\n",
      "incremental_layers.2.gate_generator.1.bias\n",
      "incremental_layers.2.gate_generator.5.weight\n",
      "incremental_layers.2.gate_generator.5.bias\n",
      "arc_classifiers.0.weight\n",
      "arc_classifiers.1.weight\n",
      "arc_classifiers.2.weight\n"
     ]
    }
   ],
   "source": [
    "# 指定要冻结的层前缀\n",
    "freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    'fc',\n",
    "    'incremental_layers.0',\n",
    "    'incremental_layers.1.layer',\n",
    "    #'arc_classifiers.0',\n",
    "    #'arc_classifiers.1',\n",
    "]\n",
    "freeze_layers(model, freeze_prefixes)\n",
    "\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_train_set = ConcatDataset([exampler_train_sets[0], exampler_train_sets[1],origin_train_set[2]])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                   batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "\n",
    "\n",
    "combined_test_set = []\n",
    "for i in range(tsk+1):\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_test_set.append(origin_test_set[i])\n",
    "combined_test_set=ConcatDataset(combined_test_set)\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                  batch_size=batch_size, shuffle=False, num_workers=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 3.59998704 35.99870405  1.43999793]\n",
      "num_prev_task_classes: 55\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 3.59998704 35.99870405  1.43999793]\n"
     ]
    }
   ],
   "source": [
    "num_epochs=90\n",
    "weight_decay=0.001\n",
    "max_lr=0.01\n",
    "weight_label=40\n",
    "\n",
    "# 创建调度器和优化器\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(combined_train_loader),three_phase=True)\n",
    "class_stats = get_class_stats(combined_train_loader)\n",
    "# 计算补齐后的 alpha\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                w_cls=1, w_tsk=1)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=1,reduction='mean') \n",
    "criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=1,reduction='mean') \n",
    "#criterion_label=None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "92acae2844c44b0faef94d8e57c6326e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/5130 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/90\n",
      "---------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/90 - Train Loss: 169.5998\n",
      "Per Task Accuracy: {0: 0.5261115561786501, 1: 0.957843638221768, 2: 0.011486835312338668}\n",
      "Validation Loss: 291.5595 - Validation Accuracy: 0.5407\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 2/90\n",
      "---------------\n",
      "Epoch 2/90 - Train Loss: 122.1547\n",
      "Per Task Accuracy: {0: 0.6929451231724414, 1: 0.864460909555442, 2: 0.033686112545172946}\n",
      "Validation Loss: 259.2014 - Validation Accuracy: 0.5810\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 3/90\n",
      "---------------\n",
      "Epoch 3/90 - Train Loss: 109.7653\n",
      "Per Task Accuracy: {0: 0.7910825155217304, 1: 0.7926673479816045, 2: 0.04762519359834796}\n",
      "Validation Loss: 250.9760 - Validation Accuracy: 0.5930\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 4/90\n",
      "---------------\n",
      "Epoch 4/90 - Train Loss: 108.6117\n",
      "Per Task Accuracy: {0: 0.7832715802122973, 1: 0.8140010219724068, 2: 0.05536912751677853}\n",
      "Validation Loss: 251.7884 - Validation Accuracy: 0.5932\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 5/90\n",
      "---------------\n",
      "Epoch 5/90 - Train Loss: 99.4383\n",
      "Per Task Accuracy: {0: 0.7768375726016423, 1: 0.8460654062340317, 2: 0.05317501290655653}\n",
      "Validation Loss: 261.4108 - Validation Accuracy: 0.5907\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 6/90\n",
      "---------------\n",
      "Epoch 6/90 - Train Loss: 94.3335\n",
      "Per Task Accuracy: {0: 0.8289355097135991, 1: 0.7639243740419008, 2: 0.09292720702116675}\n",
      "Validation Loss: 250.7829 - Validation Accuracy: 0.6050\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 7/90\n",
      "---------------\n",
      "Epoch 7/90 - Train Loss: 92.8042\n",
      "Per Task Accuracy: {0: 0.8471860604846786, 1: 0.7360756259580992, 2: 0.11060918946824987}\n",
      "Validation Loss: 248.5086 - Validation Accuracy: 0.6102\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 8/90\n",
      "---------------\n",
      "Epoch 8/90 - Train Loss: 91.2904\n",
      "Per Task Accuracy: {0: 0.8576006408972562, 1: 0.7259836484414921, 2: 0.1142230252968508}\n",
      "Validation Loss: 245.0103 - Validation Accuracy: 0.6125\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 9/90\n",
      "---------------\n",
      "Epoch 9/90 - Train Loss: 90.0933\n",
      "Per Task Accuracy: {0: 0.8429551371920689, 1: 0.7305825242718447, 2: 0.17359318533815177}\n",
      "Validation Loss: 247.7289 - Validation Accuracy: 0.6050\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 10/90\n",
      "---------------\n",
      "Epoch 10/90 - Train Loss: 87.9332\n",
      "Per Task Accuracy: {0: 0.8569997997196075, 1: 0.74386816555953, 2: 0.17307692307692307}\n",
      "Validation Loss: 246.4109 - Validation Accuracy: 0.6118\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 11/90\n",
      "---------------\n",
      "Epoch 11/90 - Train Loss: 85.4352\n",
      "Per Task Accuracy: {0: 0.880032044862808, 1: 0.6438426162493612, 2: 0.18959731543624161}\n",
      "Validation Loss: 243.7117 - Validation Accuracy: 0.6135\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 12/90\n",
      "---------------\n",
      "Epoch 12/90 - Train Loss: 87.4166\n",
      "Per Task Accuracy: {0: 0.8664129781694372, 1: 0.6856157383750638, 2: 0.20302013422818793}\n",
      "Validation Loss: 247.1883 - Validation Accuracy: 0.6073\n",
      "---------------\n",
      "Epoch 13/90\n",
      "---------------\n",
      "Epoch 13/90 - Train Loss: 85.4536\n",
      "Per Task Accuracy: {0: 0.8503905467654717, 1: 0.6552120592743996, 2: 0.2902684563758389}\n",
      "Validation Loss: 249.8104 - Validation Accuracy: 0.6088\n",
      "---------------\n",
      "Epoch 14/90\n",
      "---------------\n",
      "Epoch 14/90 - Train Loss: 85.7689\n",
      "Per Task Accuracy: {0: 0.8691918686160625, 1: 0.6935360245273378, 2: 0.19372741352607126}\n",
      "Validation Loss: 244.0603 - Validation Accuracy: 0.6140\n",
      "---------------\n",
      "Epoch 15/90\n",
      "---------------\n",
      "Epoch 15/90 - Train Loss: 85.1917\n",
      "Per Task Accuracy: {0: 0.8768275585820148, 1: 0.6517629024016351, 2: 0.2541300980898296}\n",
      "Validation Loss: 245.7287 - Validation Accuracy: 0.6132\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 16/90\n",
      "---------------\n",
      "Epoch 16/90 - Train Loss: 80.1494\n",
      "Per Task Accuracy: {0: 0.8656118565992389, 1: 0.6075625958099131, 2: 0.31350025813113064}\n",
      "Validation Loss: 248.5587 - Validation Accuracy: 0.6128\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 17/90\n",
      "---------------\n",
      "Epoch 17/90 - Train Loss: 81.2008\n",
      "Per Task Accuracy: {0: 0.8419787702783897, 1: 0.67769545222279, 2: 0.32447083118224057}\n",
      "Validation Loss: 250.3058 - Validation Accuracy: 0.6062\n",
      "---------------\n",
      "Epoch 18/90\n",
      "---------------\n",
      "Epoch 18/90 - Train Loss: 79.9678\n",
      "Per Task Accuracy: {0: 0.8970308431804527, 1: 0.6120337250894226, 2: 0.2769747031491998}\n",
      "Validation Loss: 242.6152 - Validation Accuracy: 0.6205\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 19/90\n",
      "---------------\n",
      "Epoch 19/90 - Train Loss: 78.1754\n",
      "Per Task Accuracy: {0: 0.8652112958141398, 1: 0.6831885539090444, 2: 0.3353123386680434}\n",
      "Validation Loss: 251.0912 - Validation Accuracy: 0.6163\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 20/90\n",
      "---------------\n",
      "Epoch 20/90 - Train Loss: 76.6364\n",
      "Per Task Accuracy: {0: 0.8253304626477068, 1: 0.6851047521716913, 2: 0.3642230252968508}\n",
      "Validation Loss: 253.7424 - Validation Accuracy: 0.6122\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 21/90\n",
      "---------------\n",
      "Epoch 21/90 - Train Loss: 79.6864\n",
      "Per Task Accuracy: {0: 0.8329411175645904, 1: 0.7052887072049054, 2: 0.34925141972121837}\n",
      "Validation Loss: 251.0805 - Validation Accuracy: 0.6103\n",
      "---------------\n",
      "Epoch 22/90\n",
      "---------------\n",
      "Epoch 22/90 - Train Loss: 78.0825\n",
      "Per Task Accuracy: {0: 0.8191217704786702, 1: 0.7312212570260603, 2: 0.35970573051109966}\n",
      "Validation Loss: 257.3271 - Validation Accuracy: 0.6090\n",
      "---------------\n",
      "Epoch 23/90\n",
      "---------------\n",
      "Epoch 23/90 - Train Loss: 79.1610\n",
      "Per Task Accuracy: {0: 0.834543360704987, 1: 0.7448901379662749, 2: 0.33479607640681464}\n",
      "Validation Loss: 254.8482 - Validation Accuracy: 0.6163\n",
      "---------------\n",
      "Epoch 24/90\n",
      "---------------\n",
      "Epoch 24/90 - Train Loss: 78.3847\n",
      "Per Task Accuracy: {0: 0.8409272982175046, 1: 0.7391415431783341, 2: 0.3156943727413526}\n",
      "Validation Loss: 252.0085 - Validation Accuracy: 0.6163\n",
      "---------------\n",
      "Epoch 25/90\n",
      "---------------\n",
      "Epoch 25/90 - Train Loss: 78.6955\n",
      "Per Task Accuracy: {0: 0.8578009212898057, 1: 0.5984925907000511, 2: 0.38177594217862676}\n",
      "Validation Loss: 249.8016 - Validation Accuracy: 0.6228\n",
      "---------------\n",
      "Epoch 26/90\n",
      "---------------\n",
      "Epoch 26/90 - Train Loss: 75.5996\n",
      "Per Task Accuracy: {0: 0.8780292409373123, 1: 0.6083290751149719, 2: 0.39429530201342283}\n",
      "Validation Loss: 243.6355 - Validation Accuracy: 0.6247\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 27/90\n",
      "---------------\n",
      "Epoch 27/90 - Train Loss: 76.3976\n",
      "Per Task Accuracy: {0: 0.8597786901662328, 1: 0.6811446090955544, 2: 0.36886938564790916}\n",
      "Validation Loss: 249.0092 - Validation Accuracy: 0.6202\n",
      "---------------\n",
      "Epoch 28/90\n",
      "---------------\n",
      "Epoch 28/90 - Train Loss: 74.0325\n",
      "Per Task Accuracy: {0: 0.860004005607851, 1: 0.6394992335206949, 2: 0.390036138358286}\n",
      "Validation Loss: 250.2840 - Validation Accuracy: 0.6210\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 29/90\n",
      "---------------\n",
      "Epoch 29/90 - Train Loss: 72.6513\n",
      "Per Task Accuracy: {0: 0.8018976567194072, 1: 0.7551098620337251, 2: 0.4019101703665462}\n",
      "Validation Loss: 258.2348 - Validation Accuracy: 0.6102\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 30/90\n",
      "---------------\n",
      "Epoch 30/90 - Train Loss: 73.4901\n",
      "Per Task Accuracy: {0: 0.8425545764069697, 1: 0.7171691364333163, 2: 0.38642230252968507}\n",
      "Validation Loss: 248.4149 - Validation Accuracy: 0.6183\n",
      "---------------\n",
      "Epoch 31/90\n",
      "---------------\n",
      "Epoch 31/90 - Train Loss: 70.2401\n",
      "Per Task Accuracy: {0: 0.8652112958141398, 1: 0.6798671435871232, 2: 0.4143004646360351}\n",
      "Validation Loss: 249.1689 - Validation Accuracy: 0.6202\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 32/90\n",
      "---------------\n",
      "Epoch 32/90 - Train Loss: 76.0327\n",
      "Per Task Accuracy: {0: 0.820123172441418, 1: 0.6613438937148697, 2: 0.40203923593185337}\n",
      "Validation Loss: 251.7917 - Validation Accuracy: 0.6153\n",
      "---------------\n",
      "Epoch 33/90\n",
      "---------------\n",
      "Epoch 33/90 - Train Loss: 67.5748\n",
      "Per Task Accuracy: {0: 0.8099339074704587, 1: 0.7110373019928462, 2: 0.44643779039752196}\n",
      "Validation Loss: 257.9074 - Validation Accuracy: 0.6122\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 34/90\n",
      "---------------\n",
      "Epoch 34/90 - Train Loss: 70.6163\n",
      "Per Task Accuracy: {0: 0.8223262567594632, 1: 0.6674757281553398, 2: 0.49290139390810533}\n",
      "Validation Loss: 252.2903 - Validation Accuracy: 0.6182\n",
      "---------------\n",
      "Epoch 35/90\n",
      "---------------\n",
      "Epoch 35/90 - Train Loss: 67.5825\n",
      "Per Task Accuracy: {0: 0.8129130783096334, 1: 0.6553398058252428, 2: 0.5682756840474962}\n",
      "Validation Loss: 259.7211 - Validation Accuracy: 0.6133\n",
      "---------------\n",
      "Epoch 36/90\n",
      "---------------\n",
      "Epoch 36/90 - Train Loss: 66.2083\n",
      "Per Task Accuracy: {0: 0.8547967154015622, 1: 0.5507153806847215, 2: 0.5911202891068663}\n",
      "Validation Loss: 250.1951 - Validation Accuracy: 0.6173\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 37/90\n",
      "---------------\n",
      "Epoch 37/90 - Train Loss: 67.3430\n",
      "Per Task Accuracy: {0: 0.8541708391748448, 1: 0.6217424629535002, 2: 0.5622096024780588}\n",
      "Validation Loss: 253.9254 - Validation Accuracy: 0.6163\n",
      "---------------\n",
      "Epoch 38/90\n",
      "---------------\n",
      "Epoch 38/90 - Train Loss: 66.8763\n",
      "Per Task Accuracy: {0: 0.8473613058281594, 1: 0.6277465508431272, 2: 0.5538203407330924}\n",
      "Validation Loss: 255.4252 - Validation Accuracy: 0.6153\n",
      "---------------\n",
      "Epoch 39/90\n",
      "---------------\n",
      "Epoch 39/90 - Train Loss: 64.1511\n",
      "Per Task Accuracy: {0: 0.8022982175045064, 1: 0.7017118037812979, 2: 0.5486577181208053}\n",
      "Validation Loss: 264.2092 - Validation Accuracy: 0.6073\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 40/90\n",
      "---------------\n",
      "Epoch 40/90 - Train Loss: 64.4244\n",
      "Per Task Accuracy: {0: 0.7890797115962347, 1: 0.6778231987736331, 2: 0.6375838926174496}\n",
      "Validation Loss: 269.4100 - Validation Accuracy: 0.6088\n",
      "---------------\n",
      "Epoch 41/90\n",
      "---------------\n",
      "Epoch 41/90 - Train Loss: 67.0838\n",
      "Per Task Accuracy: {0: 0.8323402763869417, 1: 0.5961931527848748, 2: 0.6582343830665979}\n",
      "Validation Loss: 260.7538 - Validation Accuracy: 0.6133\n",
      "---------------\n",
      "Epoch 42/90\n",
      "---------------\n",
      "Epoch 42/90 - Train Loss: 64.1940\n",
      "Per Task Accuracy: {0: 0.8217254155818146, 1: 0.6506131834440471, 2: 0.6029943211151265}\n",
      "Validation Loss: 265.4300 - Validation Accuracy: 0.6080\n",
      "---------------\n",
      "Epoch 43/90\n",
      "---------------\n",
      "Epoch 43/90 - Train Loss: 62.3415\n",
      "Per Task Accuracy: {0: 0.8351442018826357, 1: 0.6366888094021461, 2: 0.638616417139907}\n",
      "Validation Loss: 259.2062 - Validation Accuracy: 0.6173\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 44/90\n",
      "---------------\n",
      "Epoch 44/90 - Train Loss: 62.2013\n",
      "Per Task Accuracy: {0: 0.7834718606048467, 1: 0.6725855901890649, 2: 0.6778523489932886}\n",
      "Validation Loss: 273.0351 - Validation Accuracy: 0.6013\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 45/90\n",
      "---------------\n",
      "Epoch 45/90 - Train Loss: 62.5385\n",
      "Per Task Accuracy: {0: 0.8509663528940516, 1: 0.6021972406745018, 2: 0.5699535363964894}\n",
      "Validation Loss: 253.0232 - Validation Accuracy: 0.6252\n",
      "---------------\n",
      "Epoch 46/90\n",
      "---------------\n",
      "Epoch 46/90 - Train Loss: 62.5414\n",
      "Per Task Accuracy: {0: 0.7992940116162628, 1: 0.6681144609095554, 2: 0.6897263810015488}\n",
      "Validation Loss: 263.3710 - Validation Accuracy: 0.6138\n",
      "---------------\n",
      "Epoch 47/90\n",
      "---------------\n",
      "Epoch 47/90 - Train Loss: 61.3068\n",
      "Per Task Accuracy: {0: 0.876401962747847, 1: 0.5251660705160961, 2: 0.6334537945276201}\n",
      "Validation Loss: 249.4905 - Validation Accuracy: 0.6320\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 48/90\n",
      "---------------\n",
      "Epoch 48/90 - Train Loss: 62.1228\n",
      "Per Task Accuracy: {0: 0.8018976567194072, 1: 0.6966019417475728, 2: 0.60557563242127}\n",
      "Validation Loss: 260.8233 - Validation Accuracy: 0.6123\n",
      "---------------\n",
      "Epoch 49/90\n",
      "---------------\n",
      "Epoch 49/90 - Train Loss: 58.4851\n",
      "Per Task Accuracy: {0: 0.8267324253955538, 1: 0.584312723556464, 2: 0.6675271037687145}\n",
      "Validation Loss: 262.6713 - Validation Accuracy: 0.6147\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 50/90\n",
      "---------------\n",
      "Epoch 50/90 - Train Loss: 60.5998\n",
      "Per Task Accuracy: {0: 0.8435559783697176, 1: 0.6428206438426163, 2: 0.6302271553949407}\n",
      "Validation Loss: 254.8715 - Validation Accuracy: 0.6242\n",
      "---------------\n",
      "Epoch 51/90\n",
      "---------------\n",
      "Epoch 51/90 - Train Loss: 57.4289\n",
      "Per Task Accuracy: {0: 0.8241287802924093, 1: 0.633495145631068, 2: 0.6689468249870935}\n",
      "Validation Loss: 260.4055 - Validation Accuracy: 0.6222\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 52/90\n",
      "---------------\n",
      "Epoch 52/90 - Train Loss: 58.4700\n",
      "Per Task Accuracy: {0: 0.8049018626076507, 1: 0.6590444557996934, 2: 0.6329375322663914}\n",
      "Validation Loss: 270.0524 - Validation Accuracy: 0.6072\n",
      "---------------\n",
      "Epoch 53/90\n",
      "---------------\n",
      "Epoch 53/90 - Train Loss: 58.6588\n",
      "Per Task Accuracy: {0: 0.7215852193070298, 1: 0.7275166070516096, 2: 0.7160557563242127}\n",
      "Validation Loss: 276.0446 - Validation Accuracy: 0.5957\n",
      "---------------\n",
      "Epoch 54/90\n",
      "---------------\n",
      "Epoch 54/90 - Train Loss: 59.2785\n",
      "Per Task Accuracy: {0: 0.8091077508511917, 1: 0.5817577925396015, 2: 0.711925658234383}\n",
      "Validation Loss: 262.0287 - Validation Accuracy: 0.6185\n",
      "---------------\n",
      "Epoch 55/90\n",
      "---------------\n",
      "Epoch 55/90 - Train Loss: 57.3030\n",
      "Per Task Accuracy: {0: 0.8171189665531744, 1: 0.5979816044966786, 2: 0.7021166752710377}\n",
      "Validation Loss: 262.1895 - Validation Accuracy: 0.6148\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 56/90\n",
      "---------------\n",
      "Epoch 56/90 - Train Loss: 57.3498\n",
      "Per Task Accuracy: {0: 0.807104946925696, 1: 0.5859734287174246, 2: 0.7506453278265359}\n",
      "Validation Loss: 256.9406 - Validation Accuracy: 0.6195\n",
      "---------------\n",
      "Epoch 57/90\n",
      "---------------\n",
      "Epoch 57/90 - Train Loss: 57.8981\n",
      "Per Task Accuracy: {0: 0.8233276587222111, 1: 0.5844404701073072, 2: 0.7320598864223026}\n",
      "Validation Loss: 259.1585 - Validation Accuracy: 0.6222\n",
      "---------------\n",
      "Epoch 58/90\n",
      "---------------\n",
      "Epoch 58/90 - Train Loss: 60.4590\n",
      "Per Task Accuracy: {0: 0.8117364310034048, 1: 0.6494634644864589, 2: 0.7108931337119256}\n",
      "Validation Loss: 263.9956 - Validation Accuracy: 0.6190\n",
      "---------------\n",
      "Epoch 59/90\n",
      "---------------\n",
      "Epoch 59/90 - Train Loss: 55.2251\n",
      "Per Task Accuracy: {0: 0.7998948527939115, 1: 0.5701328564128768, 2: 0.7558079504388229}\n",
      "Validation Loss: 264.3800 - Validation Accuracy: 0.6158\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 60/90\n",
      "---------------\n",
      "Epoch 60/90 - Train Loss: 56.0243\n",
      "Per Task Accuracy: {0: 0.8357450430602844, 1: 0.5656617271333674, 2: 0.7174754775425917}\n",
      "Validation Loss: 259.9897 - Validation Accuracy: 0.6247\n",
      "---------------\n",
      "Epoch 61/90\n",
      "---------------\n",
      "Epoch 61/90 - Train Loss: 59.4357\n",
      "Per Task Accuracy: {0: 0.8922491488083316, 1: 0.4124936126724578, 2: 0.6251935983479607}\n",
      "Validation Loss: 244.8547 - Validation Accuracy: 0.6380\n",
      "---------------\n",
      "Epoch 62/90\n",
      "---------------\n",
      "Epoch 62/90 - Train Loss: 60.0412\n",
      "Per Task Accuracy: {0: 0.831739435209293, 1: 0.5595298926928973, 2: 0.7201858544140424}\n",
      "Validation Loss: 253.2708 - Validation Accuracy: 0.6247\n",
      "---------------\n",
      "Epoch 63/90\n",
      "---------------\n",
      "Epoch 63/90 - Train Loss: 57.6448\n",
      "Per Task Accuracy: {0: 0.7972912076907671, 1: 0.6539345937659683, 2: 0.696437790397522}\n",
      "Validation Loss: 267.6461 - Validation Accuracy: 0.6098\n",
      "---------------\n",
      "Epoch 64/90\n",
      "---------------\n",
      "Epoch 64/90 - Train Loss: 53.3298\n",
      "Per Task Accuracy: {0: 0.7722561586220709, 1: 0.6460143076136944, 2: 0.7568404749612804}\n",
      "Validation Loss: 272.0560 - Validation Accuracy: 0.6072\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 65/90\n",
      "---------------\n",
      "Epoch 65/90 - Train Loss: 54.3695\n",
      "Per Task Accuracy: {0: 0.7958892449429201, 1: 0.5684721512519162, 2: 0.7537429013939081}\n",
      "Validation Loss: 268.9832 - Validation Accuracy: 0.6100\n",
      "---------------\n",
      "Epoch 66/90\n",
      "---------------\n",
      "Epoch 66/90 - Train Loss: 54.1028\n",
      "Per Task Accuracy: {0: 0.842779891848588, 1: 0.5654062340316811, 2: 0.6861125451729478}\n",
      "Validation Loss: 259.4337 - Validation Accuracy: 0.6273\n",
      "---------------\n",
      "Epoch 67/90\n",
      "---------------\n",
      "Epoch 67/90 - Train Loss: 54.6102\n",
      "Per Task Accuracy: {0: 0.7672491488083316, 1: 0.652529381706694, 2: 0.7666494579246257}\n",
      "Validation Loss: 277.6309 - Validation Accuracy: 0.6047\n",
      "---------------\n",
      "Epoch 68/90\n",
      "---------------\n",
      "Epoch 68/90 - Train Loss: 55.2184\n",
      "Per Task Accuracy: {0: 0.8000951331864611, 1: 0.60539090444558, 2: 0.7289623128549303}\n",
      "Validation Loss: 269.3891 - Validation Accuracy: 0.6138\n",
      "---------------\n",
      "Epoch 69/90\n",
      "---------------\n",
      "Epoch 69/90 - Train Loss: 53.6457\n",
      "Per Task Accuracy: {0: 0.7990686961746445, 1: 0.6267245784363822, 2: 0.6639132679401136}\n",
      "Validation Loss: 265.1999 - Validation Accuracy: 0.6150\n",
      "---------------\n",
      "Epoch 70/90\n",
      "---------------\n",
      "Epoch 70/90 - Train Loss: 55.1048\n",
      "Per Task Accuracy: {0: 0.7984928900460645, 1: 0.5737097598364844, 2: 0.7583892617449665}\n",
      "Validation Loss: 267.6981 - Validation Accuracy: 0.6093\n",
      "---------------\n",
      "Epoch 71/90\n",
      "---------------\n",
      "Epoch 71/90 - Train Loss: 55.1749\n",
      "Per Task Accuracy: {0: 0.758812337272181, 1: 0.6775677056719469, 2: 0.7237996902426432}\n",
      "Validation Loss: 278.7357 - Validation Accuracy: 0.5988\n",
      "---------------\n",
      "Epoch 72/90\n",
      "---------------\n",
      "Epoch 72/90 - Train Loss: 55.3482\n",
      "Per Task Accuracy: {0: 0.7556078509913879, 1: 0.5995145631067961, 2: 0.8094992256066081}\n",
      "Validation Loss: 289.8169 - Validation Accuracy: 0.5917\n",
      "---------------\n",
      "Epoch 73/90\n",
      "---------------\n",
      "Epoch 73/90 - Train Loss: 64.6947\n",
      "Per Task Accuracy: {0: 0.7692519527338274, 1: 0.5878896269800715, 2: 0.6916623644811565}\n",
      "Validation Loss: 258.5186 - Validation Accuracy: 0.6178\n",
      "---------------\n",
      "Epoch 74/90\n",
      "---------------\n",
      "Epoch 74/90 - Train Loss: 57.0537\n",
      "Per Task Accuracy: {0: 0.7660474664530342, 1: 0.621486969851814, 2: 0.752194114610222}\n",
      "Validation Loss: 260.2722 - Validation Accuracy: 0.6203\n",
      "---------------\n",
      "Epoch 75/90\n",
      "---------------\n",
      "Epoch 75/90 - Train Loss: 55.2050\n",
      "Per Task Accuracy: {0: 0.8349689565391548, 1: 0.4860756259580991, 2: 0.7217346411977285}\n",
      "Validation Loss: 255.5270 - Validation Accuracy: 0.6280\n",
      "---------------\n",
      "Epoch 76/90\n",
      "---------------\n",
      "Epoch 76/90 - Train Loss: 56.0224\n",
      "Per Task Accuracy: {0: 0.815341478069297, 1: 0.5820132856412877, 2: 0.7299948373773877}\n",
      "Validation Loss: 260.8342 - Validation Accuracy: 0.6233\n",
      "---------------\n",
      "Epoch 77/90\n",
      "---------------\n",
      "Epoch 77/90 - Train Loss: 54.6003\n",
      "Per Task Accuracy: {0: 0.7482225115161226, 1: 0.705799693408278, 2: 0.7501290655653072}\n",
      "Validation Loss: 264.2173 - Validation Accuracy: 0.6130\n",
      "---------------\n",
      "Epoch 78/90\n",
      "---------------\n",
      "Epoch 78/90 - Train Loss: 60.7451\n",
      "Per Task Accuracy: {0: 0.7285950330462647, 1: 0.7181911088400613, 2: 0.699406298399587}\n",
      "Validation Loss: 257.0376 - Validation Accuracy: 0.6160\n",
      "---------------\n",
      "Epoch 79/90\n",
      "---------------\n",
      "Epoch 79/90 - Train Loss: 57.7729\n",
      "Per Task Accuracy: {0: 0.722561586220709, 1: 0.6641543178334185, 2: 0.7692307692307693}\n",
      "Validation Loss: 267.2990 - Validation Accuracy: 0.6130\n",
      "---------------\n",
      "Epoch 80/90\n",
      "---------------\n",
      "Epoch 80/90 - Train Loss: 56.3755\n",
      "Per Task Accuracy: {0: 0.8097085920288404, 1: 0.5419008686765457, 2: 0.7248322147651006}\n",
      "Validation Loss: 260.0943 - Validation Accuracy: 0.6208\n",
      "---------------\n",
      "Epoch 81/90\n",
      "---------------\n",
      "Epoch 81/90 - Train Loss: 55.7986\n",
      "Per Task Accuracy: {0: 0.782470458642099, 1: 0.60947879407256, 2: 0.7516778523489933}\n",
      "Validation Loss: 261.2495 - Validation Accuracy: 0.6155\n",
      "---------------\n",
      "Epoch 82/90\n",
      "---------------\n",
      "Epoch 82/90 - Train Loss: 53.6292\n",
      "Per Task Accuracy: {0: 0.778264570398558, 1: 0.6469085334695963, 2: 0.7558079504388229}\n",
      "Validation Loss: 265.0645 - Validation Accuracy: 0.6203\n",
      "---------------\n",
      "Epoch 83/90\n",
      "---------------\n",
      "Epoch 83/90 - Train Loss: 55.9939\n",
      "Per Task Accuracy: {0: 0.8008962547566594, 1: 0.5921052631578947, 2: 0.7376097057305111}\n",
      "Validation Loss: 261.6020 - Validation Accuracy: 0.6193\n",
      "---------------\n",
      "Epoch 84/90\n",
      "---------------\n",
      "Epoch 84/90 - Train Loss: 53.1014\n",
      "Per Task Accuracy: {0: 0.8037001802523533, 1: 0.5679611650485437, 2: 0.7609705730511099}\n",
      "Validation Loss: 263.0655 - Validation Accuracy: 0.6178\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 85/90\n",
      "---------------\n",
      "Epoch 85/90 - Train Loss: 52.7290\n",
      "Per Task Accuracy: {0: 0.8095083116362908, 1: 0.5965763924374042, 2: 0.7237996902426432}\n",
      "Validation Loss: 267.3003 - Validation Accuracy: 0.6150\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 86/90\n",
      "---------------\n",
      "Epoch 86/90 - Train Loss: 53.7715\n",
      "Per Task Accuracy: {0: 0.7988934508311636, 1: 0.5955544200306592, 2: 0.7862674238513164}\n",
      "Validation Loss: 273.9612 - Validation Accuracy: 0.6123\n",
      "---------------\n",
      "Epoch 87/90\n",
      "---------------\n",
      "Epoch 87/90 - Train Loss: 52.4044\n",
      "Per Task Accuracy: {0: 0.8004956939715602, 1: 0.5940214614205417, 2: 0.7945276200309758}\n",
      "Validation Loss: 276.0315 - Validation Accuracy: 0.6125\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 88/90\n",
      "---------------\n",
      "Epoch 88/90 - Train Loss: 53.0948\n",
      "Per Task Accuracy: {0: 0.8030993390747045, 1: 0.5507153806847215, 2: 0.6761744966442953}\n",
      "Validation Loss: 266.2256 - Validation Accuracy: 0.6187\n",
      "---------------\n",
      "Epoch 89/90\n",
      "---------------\n",
      "Epoch 89/90 - Train Loss: 57.1083\n",
      "Per Task Accuracy: {0: 0.788478870418586, 1: 0.5914665304036791, 2: 0.7035363964894166}\n",
      "Validation Loss: 264.9353 - Validation Accuracy: 0.6222\n",
      "---------------\n",
      "Epoch 90/90\n",
      "---------------\n",
      "Epoch 90/90 - Train Loss: 56.0198\n",
      "Per Task Accuracy: {0: 0.7980923292609654, 1: 0.6106285130301482, 2: 0.7117965926690759}\n",
      "Validation Loss: 262.5437 - Validation Accuracy: 0.6222\n"
     ]
    }
   ],
   "source": [
    "# 训练\n",
    "model.to(device)\n",
    "model=train_model(device, model, \n",
    "            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss on test set: 1.7463\n",
      "Accuracy on test set: 0.6228\n",
      "Loss on test set: 1.5220\n",
      "Accuracy on test set: 0.5660\n",
      "Loss on test set: 1.1264\n",
      "Accuracy on test set: 0.6720\n"
     ]
    }
   ],
   "source": [
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[2]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss on test set: 263.2331\n",
      "Accuracy on test set: 0.6222\n"
     ]
    }
   ],
   "source": [
    "# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader}, criterion)\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_P1_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Loss on test set: 1.7694\n",
    "Accuracy on test set: 0.6104\n",
    "Loss on test set: 1.5682\n",
    "Accuracy on test set: 0.5540\n",
    "Loss on test set: 0.9174\n",
    "Accuracy on test set: 0.7260\n",
    "\n",
    "Loss on test set: 258.1004\n",
    "Accuracy on test set: 0.6153\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "Loss on test set: 1.7602\n",
    "Accuracy on test set: 0.6118\n",
    "Loss on test set: 1.3124\n",
    "Accuracy on test set: 0.5940\n",
    "Loss on test set: 1.1758\n",
    "Accuracy on test set: 0.5920\n",
    "\n",
    "Loss on test set: 260.6121\n",
    "Accuracy on test set: 0.6087\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=1,reduction='mean') \n",
    "criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=1,reduction='mean') \n",
    "\n",
    "Loss on test set: 1.7463\n",
    "Accuracy on test set: 0.6228\n",
    "Loss on test set: 1.5220\n",
    "Accuracy on test set: 0.5660\n",
    "Loss on test set: 1.1264\n",
    "Accuracy on test set: 0.6720\n",
    "\n",
    "Loss on test set: 263.2331\n",
    "Accuracy on test set: 0.6222"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 1\n",
      "Incremental Task Layer added, current len: 1\n",
      "Incremental ARC Classifier added, current len: 2\n",
      "Incremental Task Layer added, current len: 2\n",
      "Incremental ARC Classifier added, current len: 3\n",
      "Incremental Task Layer added, current len: 3\n",
      "模型已从 /home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_2_P1_0.622_20241222_152405.pth 成功加载。\n",
      "Incremental ARC Classifier added, current len: 4\n",
      "Incremental Task Layer added, current len: 4\n"
     ]
    }
   ],
   "source": [
    "tsk=3\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[1], s=s, m=0)\n",
    "model.add_incremental_task_layer() \n",
    "\n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[2], s=s, m=0)\n",
    "model.add_incremental_task_layer() \n",
    "\n",
    "file_name='cascade_tsk_2_P1_0.622_20241222_152405.pth'\n",
    "full_path=os.path.join(model_save_pth,file_name)\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "\n",
    "# 添加当前任务对应层\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[3], s=s, m=0)\n",
    "model.add_incremental_task_layer() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing fc.0.weight\n",
      "----Freezing fc.0.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "incremental_layers.1.gate_generator.0.weight\n",
      "incremental_layers.1.gate_generator.0.bias\n",
      "incremental_layers.1.gate_generator.1.weight\n",
      "incremental_layers.1.gate_generator.1.bias\n",
      "incremental_layers.1.gate_generator.5.weight\n",
      "incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "incremental_layers.2.gate_generator.0.weight\n",
      "incremental_layers.2.gate_generator.0.bias\n",
      "incremental_layers.2.gate_generator.1.weight\n",
      "incremental_layers.2.gate_generator.1.bias\n",
      "incremental_layers.2.gate_generator.5.weight\n",
      "incremental_layers.2.gate_generator.5.bias\n",
      "incremental_layers.3.layer.0.weight\n",
      "incremental_layers.3.layer.0.bias\n",
      "incremental_layers.3.layer.1.weight\n",
      "incremental_layers.3.layer.1.bias\n",
      "incremental_layers.3.layer.3.weight\n",
      "incremental_layers.3.layer.3.bias\n",
      "incremental_layers.3.layer.4.weight\n",
      "incremental_layers.3.layer.4.bias\n",
      "incremental_layers.3.layer.6.weight\n",
      "incremental_layers.3.layer.6.bias\n",
      "incremental_layers.3.layer.7.weight\n",
      "incremental_layers.3.layer.7.bias\n",
      "incremental_layers.3.gate_generator.0.weight\n",
      "incremental_layers.3.gate_generator.0.bias\n",
      "incremental_layers.3.gate_generator.1.weight\n",
      "incremental_layers.3.gate_generator.1.bias\n",
      "incremental_layers.3.gate_generator.5.weight\n",
      "incremental_layers.3.gate_generator.5.bias\n",
      "----Freezing arc_classifiers.0.weight\n",
      "----Freezing arc_classifiers.1.weight\n",
      "----Freezing arc_classifiers.2.weight\n",
      "arc_classifiers.3.weight\n"
     ]
    }
   ],
   "source": [
    "# 指定要冻结的层前缀\n",
    "freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    'fc',\n",
    "    'incremental_layers.0',\n",
    "    'incremental_layers.1.layer',\n",
    "    'incremental_layers.2.layer',\n",
    "    'arc_classifiers.0',\n",
    "    'arc_classifiers.1',\n",
    "    'arc_classifiers.2',\n",
    "\n",
    "]\n",
    "freeze_layers(model, freeze_prefixes)\n",
    "\n",
    "# 使用ConcatDataset合并这两个数据集\n",
    "combined_train_set = ConcatDataset([exampler_train_sets[0], exampler_train_sets[1], exampler_train_sets[2],origin_train_set[3]])\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                   batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "\n",
    "\n",
    "combined_test_set = []\n",
    "for i in range(tsk+1):\n",
    "    # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "    # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "    combined_test_set.append(origin_test_set[i])\n",
    "combined_test_set=ConcatDataset(combined_test_set)\n",
    "# 创建一个新的DataLoader来加载合并后的数据集\n",
    "combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                  batch_size=batch_size, shuffle=False, num_workers=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 3.69998631 36.99863105 36.99863105  1.47999781]\n",
      "num_prev_task_classes: 60\n",
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 3.69998631 36.99863105 36.99863105  1.47999781]\n"
     ]
    }
   ],
   "source": [
    "num_epochs=90\n",
    "weight_decay=0.001\n",
    "max_lr=0.01\n",
    "weight_label=1\n",
    "\n",
    "# 创建调度器和优化器\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                steps_per_epoch=len(combined_train_loader),three_phase=True)\n",
    "class_stats = get_class_stats(combined_train_loader)\n",
    "# 计算补齐后的 alpha\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                w_cls=1, w_tsk=1)\n",
    "print(\"alpha_cls:\", alpha_cls)\n",
    "print(\"alpha_tsk:\", alpha_tsk)\n",
    "alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=1,reduction='mean') \n",
    "criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=1,reduction='mean') \n",
    "#criterion_label=None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "772450f46aeb40528ef48581119298a8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/5220 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/90\n",
      "---------------\n",
      "Epoch 1/90 - Train Loss: 113.1106\n",
      "Per Task Accuracy: {0: 0.8267574604446225, 1: 0.556208482370976, 2: 0.3404749612803304, 3: 0.05836421386977236}\n",
      "Validation Loss: 299.5476 - Validation Accuracy: 0.5585\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 2/90\n",
      "---------------\n",
      "Epoch 2/90 - Train Loss: 66.7148\n",
      "Per Task Accuracy: {0: 0.8135139194872822, 1: 0.533214103219213, 2: 0.6241610738255033, 3: 0.0}\n",
      "Validation Loss: 302.6843 - Validation Accuracy: 0.5597\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 3/90\n",
      "---------------\n",
      "Epoch 3/90 - Train Loss: 63.0663\n",
      "Per Task Accuracy: {0: 0.7416132585619868, 1: 0.6484414920797138, 2: 0.701600413009809, 3: 0.0021175224986765486}\n",
      "Validation Loss: 323.1308 - Validation Accuracy: 0.5417\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 4/90\n",
      "---------------\n",
      "Epoch 4/90 - Train Loss: 59.4172\n",
      "Per Task Accuracy: {0: 0.7802673743240537, 1: 0.6257026060296372, 2: 0.6757872999483737, 3: 0.005955532027527793}\n",
      "Validation Loss: 315.3245 - Validation Accuracy: 0.5580\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 5/90\n",
      "---------------\n",
      "Epoch 5/90 - Train Loss: 58.1159\n",
      "Per Task Accuracy: {0: 0.7552072902062889, 1: 0.7010730710270823, 2: 0.5966701084150748, 3: 0.0026469031233456856}\n",
      "Validation Loss: 319.9852 - Validation Accuracy: 0.5571\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 6/90\n",
      "---------------\n",
      "Epoch 6/90 - Train Loss: 56.3430\n",
      "Per Task Accuracy: {0: 0.7594382134988985, 1: 0.6457588145120082, 2: 0.6359060402684564, 3: 0.0}\n",
      "Validation Loss: 312.3920 - Validation Accuracy: 0.5642\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 7/90\n",
      "---------------\n",
      "Epoch 7/90 - Train Loss: 55.0902\n",
      "Per Task Accuracy: {0: 0.8313639094732626, 1: 0.587378640776699, 2: 0.5511099638616417, 3: 0.0021175224986765486}\n",
      "Validation Loss: 300.3286 - Validation Accuracy: 0.5789\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 8/90\n",
      "---------------\n",
      "Epoch 8/90 - Train Loss: 54.4691\n",
      "Per Task Accuracy: {0: 0.7965151211696375, 1: 0.5357690342360756, 2: 0.6828859060402684, 3: 0.015352038115404976}\n",
      "Validation Loss: 309.9971 - Validation Accuracy: 0.5734\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 9/90\n",
      "---------------\n",
      "Epoch 9/90 - Train Loss: 54.8794\n",
      "Per Task Accuracy: {0: 0.7867013819347086, 1: 0.6046244251405212, 2: 0.5961538461538461, 3: 0.0}\n",
      "Validation Loss: 306.4990 - Validation Accuracy: 0.5718\n",
      "---------------\n",
      "Epoch 10/90\n",
      "---------------\n",
      "Epoch 10/90 - Train Loss: 52.3219\n",
      "Per Task Accuracy: {0: 0.8704185860204287, 1: 0.3921819110884006, 2: 0.6028652555498193, 3: 0.0}\n",
      "Validation Loss: 297.5316 - Validation Accuracy: 0.5851\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 11/90\n",
      "---------------\n",
      "Epoch 11/90 - Train Loss: 50.4194\n",
      "Per Task Accuracy: {0: 0.8531944722611656, 1: 0.5038323965252938, 2: 0.5987351574599896, 3: 0.003838009528851244}\n",
      "Validation Loss: 296.2956 - Validation Accuracy: 0.5874\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 12/90\n",
      "---------------\n",
      "Epoch 12/90 - Train Loss: 53.4851\n",
      "Per Task Accuracy: {0: 0.854396154616463, 1: 0.4721512519161983, 2: 0.6312596799173981, 3: 0.006352567496029645}\n",
      "Validation Loss: 304.0004 - Validation Accuracy: 0.5822\n",
      "---------------\n",
      "Epoch 13/90\n",
      "---------------\n",
      "Epoch 13/90 - Train Loss: 52.1573\n",
      "Per Task Accuracy: {0: 0.8193470859202884, 1: 0.636561062851303, 2: 0.5476251935983479, 3: 0.07451032292218104}\n",
      "Validation Loss: 304.1262 - Validation Accuracy: 0.5786\n",
      "---------------\n",
      "Epoch 14/90\n",
      "---------------\n",
      "Epoch 14/90 - Train Loss: 53.3690\n",
      "Per Task Accuracy: {0: 0.8131383937512517, 1: 0.6282575370464998, 2: 0.6054465668559629, 3: 0.012308099523557438}\n",
      "Validation Loss: 307.9007 - Validation Accuracy: 0.5763\n",
      "---------------\n",
      "Epoch 15/90\n",
      "---------------\n",
      "Epoch 15/90 - Train Loss: 50.5534\n",
      "Per Task Accuracy: {0: 0.819547366312838, 1: 0.547138477261114, 2: 0.6524264326277749, 3: 0.014425622022233986}\n",
      "Validation Loss: 309.5245 - Validation Accuracy: 0.5758\n",
      "---------------\n",
      "Epoch 16/90\n",
      "---------------\n",
      "Epoch 16/90 - Train Loss: 49.1267\n",
      "Per Task Accuracy: {0: 0.8257310234328059, 1: 0.5835462442514052, 2: 0.624677336086732, 3: 0.018660667019587084}\n",
      "Validation Loss: 306.6658 - Validation Accuracy: 0.5805\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 17/90\n",
      "---------------\n",
      "Epoch 17/90 - Train Loss: 52.3289\n",
      "Per Task Accuracy: {0: 0.8668135389545364, 1: 0.4780275932549821, 2: 0.5228446050593701, 3: 0.032556908417151934}\n",
      "Validation Loss: 296.6088 - Validation Accuracy: 0.5854\n",
      "---------------\n",
      "Epoch 18/90\n",
      "---------------\n",
      "Epoch 18/90 - Train Loss: 50.8563\n",
      "Per Task Accuracy: {0: 0.7973162427398358, 1: 0.6392437404190087, 2: 0.591636551368095, 3: 0.03983589200635257}\n",
      "Validation Loss: 305.9757 - Validation Accuracy: 0.5769\n",
      "---------------\n",
      "Epoch 19/90\n",
      "---------------\n",
      "Epoch 19/90 - Train Loss: 50.4482\n",
      "Per Task Accuracy: {0: 0.8449829761666333, 1: 0.5277210015329586, 2: 0.6193856479091379, 3: 0.04340921122286924}\n",
      "Validation Loss: 296.6150 - Validation Accuracy: 0.5855\n",
      "---------------\n",
      "Epoch 20/90\n",
      "---------------\n",
      "Epoch 20/90 - Train Loss: 48.3429\n",
      "Per Task Accuracy: {0: 0.8491888644101743, 1: 0.5693663771078181, 2: 0.5627258647392875, 3: 0.04036527263102171}\n",
      "Validation Loss: 294.5559 - Validation Accuracy: 0.5872\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 21/90\n",
      "---------------\n",
      "Epoch 21/90 - Train Loss: 49.2533\n",
      "Per Task Accuracy: {0: 0.8944522331263769, 1: 0.40035769034236074, 2: 0.554207537429014, 3: 0.01985177342509264}\n",
      "Validation Loss: 289.3213 - Validation Accuracy: 0.5954\n",
      "---------------\n",
      "Epoch 22/90\n",
      "---------------\n",
      "Epoch 22/90 - Train Loss: 49.5021\n",
      "Per Task Accuracy: {0: 0.7608401762467455, 1: 0.6481859989780276, 2: 0.7408363448631905, 3: 0.028321863419798834}\n",
      "Validation Loss: 326.2101 - Validation Accuracy: 0.5623\n",
      "---------------\n",
      "Epoch 23/90\n",
      "---------------\n",
      "Epoch 23/90 - Train Loss: 48.7818\n",
      "Per Task Accuracy: {0: 0.8279591427999199, 1: 0.6011752682677568, 2: 0.5641455859576665, 3: 0.03560084700899947}\n",
      "Validation Loss: 296.6767 - Validation Accuracy: 0.5851\n",
      "---------------\n",
      "Epoch 24/90\n",
      "---------------\n",
      "Epoch 24/90 - Train Loss: 51.1033\n",
      "Per Task Accuracy: {0: 0.8692169036651312, 1: 0.4219468574348493, 2: 0.4834796076406815, 3: 0.07967178401270514}\n",
      "Validation Loss: 293.4382 - Validation Accuracy: 0.5906\n",
      "---------------\n",
      "Epoch 25/90\n",
      "---------------\n",
      "Epoch 25/90 - Train Loss: 49.2592\n",
      "Per Task Accuracy: {0: 0.8199228920488684, 1: 0.5298926928972918, 2: 0.6184821889519876, 3: 0.07331921651667549}\n",
      "Validation Loss: 299.6998 - Validation Accuracy: 0.5808\n",
      "---------------\n",
      "Epoch 26/90\n",
      "---------------\n",
      "Epoch 26/90 - Train Loss: 48.9197\n",
      "Per Task Accuracy: {0: 0.7478469857800921, 1: 0.6575114971895759, 2: 0.7408363448631905, 3: 0.05042350449973531}\n",
      "Validation Loss: 325.3810 - Validation Accuracy: 0.5611\n",
      "---------------\n",
      "Epoch 27/90\n",
      "---------------\n",
      "Epoch 27/90 - Train Loss: 46.6252\n",
      "Per Task Accuracy: {0: 0.8289605447626678, 1: 0.5518650996423097, 2: 0.57769747031492, 3: 0.12321334039174166}\n",
      "Validation Loss: 296.3378 - Validation Accuracy: 0.5865\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 28/90\n",
      "---------------\n",
      "Epoch 28/90 - Train Loss: 47.4418\n",
      "Per Task Accuracy: {0: 0.8616062487482475, 1: 0.4605263157894737, 2: 0.5765358802271554, 3: 0.008470089994706194}\n",
      "Validation Loss: 295.3985 - Validation Accuracy: 0.5882\n",
      "---------------\n",
      "Epoch 29/90\n",
      "---------------\n",
      "Epoch 29/90 - Train Loss: 49.0184\n",
      "Per Task Accuracy: {0: 0.7356048467854996, 1: 0.64256515074093, 2: 0.748580278781621, 3: 0.043541556379036526}\n",
      "Validation Loss: 326.6965 - Validation Accuracy: 0.5594\n",
      "---------------\n",
      "Epoch 30/90\n",
      "---------------\n",
      "Epoch 30/90 - Train Loss: 51.6470\n",
      "Per Task Accuracy: {0: 0.8419787702783897, 1: 0.6055186509964231, 2: 0.3253742901393908, 3: 0.08205399682371625}\n",
      "Validation Loss: 295.6436 - Validation Accuracy: 0.5814\n",
      "---------------\n",
      "Epoch 31/90\n",
      "---------------\n",
      "Epoch 31/90 - Train Loss: 54.0640\n",
      "Per Task Accuracy: {0: 0.8972561586220709, 1: 0.4512008175779254, 2: 0.4024264326277749, 3: 0.04340921122286924}\n",
      "Validation Loss: 289.8893 - Validation Accuracy: 0.5894\n",
      "---------------\n",
      "Epoch 32/90\n",
      "---------------\n",
      "Epoch 32/90 - Train Loss: 49.5314\n",
      "Per Task Accuracy: {0: 0.871620268375726, 1: 0.4182422074603986, 2: 0.5006453278265359, 3: 0.1422710428798306}\n",
      "Validation Loss: 292.8235 - Validation Accuracy: 0.5918\n",
      "---------------\n",
      "Epoch 33/90\n",
      "---------------\n",
      "Epoch 33/90 - Train Loss: 48.8006\n",
      "Per Task Accuracy: {0: 0.7968906469056679, 1: 0.6182933060807358, 2: 0.6024780588538978, 3: 0.1859449444150344}\n",
      "Validation Loss: 302.1470 - Validation Accuracy: 0.5749\n",
      "---------------\n",
      "Epoch 34/90\n",
      "---------------\n",
      "Epoch 34/90 - Train Loss: 47.5597\n",
      "Per Task Accuracy: {0: 0.7746595233326657, 1: 0.611394992335207, 2: 0.6515229736706247, 3: 0.13088935944944416}\n",
      "Validation Loss: 315.4133 - Validation Accuracy: 0.5672\n",
      "---------------\n",
      "Epoch 35/90\n",
      "---------------\n",
      "Epoch 35/90 - Train Loss: 50.1253\n",
      "Per Task Accuracy: {0: 0.8950280392549569, 1: 0.3734031681144609, 2: 0.4412751677852349, 3: 0.11739015352038115}\n",
      "Validation Loss: 285.8094 - Validation Accuracy: 0.5969\n",
      "---------------\n",
      "Epoch 36/90\n",
      "---------------\n",
      "Epoch 36/90 - Train Loss: 51.5899\n",
      "Per Task Accuracy: {0: 0.8191217704786702, 1: 0.5654062340316811, 2: 0.5240061951471348, 3: 0.18170989941768131}\n",
      "Validation Loss: 300.2938 - Validation Accuracy: 0.5786\n",
      "---------------\n",
      "Epoch 37/90\n",
      "---------------\n",
      "Epoch 37/90 - Train Loss: 51.9238\n",
      "Per Task Accuracy: {0: 0.8143150410574804, 1: 0.5231221257026061, 2: 0.45160041300980897, 3: 0.18806246691371095}\n",
      "Validation Loss: 298.5175 - Validation Accuracy: 0.5840\n",
      "---------------\n",
      "Epoch 38/90\n",
      "---------------\n",
      "Epoch 38/90 - Train Loss: 49.7033\n",
      "Per Task Accuracy: {0: 0.8555978369717605, 1: 0.41568727644353604, 2: 0.5558853897780073, 3: 0.08933298041291689}\n",
      "Validation Loss: 295.9441 - Validation Accuracy: 0.5882\n",
      "---------------\n",
      "Epoch 39/90\n",
      "---------------\n",
      "Epoch 39/90 - Train Loss: 48.4628\n",
      "Per Task Accuracy: {0: 0.8279341077508512, 1: 0.457843638221768, 2: 0.5729220443985544, 3: 0.19600317628374803}\n",
      "Validation Loss: 299.7495 - Validation Accuracy: 0.5852\n",
      "---------------\n",
      "Epoch 40/90\n",
      "---------------\n",
      "Epoch 40/90 - Train Loss: 46.1159\n",
      "Per Task Accuracy: {0: 0.8209242940116163, 1: 0.4938681655595299, 2: 0.535234899328859, 3: 0.25555849655902596}\n",
      "Validation Loss: 297.1501 - Validation Accuracy: 0.5852\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 41/90\n",
      "---------------\n",
      "Epoch 41/90 - Train Loss: 45.9549\n",
      "Per Task Accuracy: {0: 0.7828710194271981, 1: 0.5315533980582524, 2: 0.6560402684563759, 3: 0.17323980942297512}\n",
      "Validation Loss: 307.7918 - Validation Accuracy: 0.5749\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 42/90\n",
      "---------------\n",
      "Epoch 42/90 - Train Loss: 48.8835\n",
      "Per Task Accuracy: {0: 0.8968305627879031, 1: 0.40585079202861524, 2: 0.3927465152297367, 3: 0.17151932239280043}\n",
      "Validation Loss: 286.1973 - Validation Accuracy: 0.5955\n",
      "---------------\n",
      "Epoch 43/90\n",
      "---------------\n",
      "Epoch 43/90 - Train Loss: 45.2924\n",
      "Per Task Accuracy: {0: 0.8297366312837973, 1: 0.5318088911599387, 2: 0.5409137842023748, 3: 0.2457649550026469}\n",
      "Validation Loss: 293.1254 - Validation Accuracy: 0.5922\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 44/90\n",
      "---------------\n",
      "Epoch 44/90 - Train Loss: 43.9042\n",
      "Per Task Accuracy: {0: 0.8261315842179051, 1: 0.4586101175268268, 2: 0.6189984512132163, 3: 0.2424563260984648}\n",
      "Validation Loss: 293.9931 - Validation Accuracy: 0.5931\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 45/90\n",
      "---------------\n",
      "Epoch 45/90 - Train Loss: 44.4101\n",
      "Per Task Accuracy: {0: 0.8403514920889245, 1: 0.4922074603985692, 2: 0.5353639648941663, 3: 0.2554261514028587}\n",
      "Validation Loss: 294.4195 - Validation Accuracy: 0.5903\n",
      "---------------\n",
      "Epoch 46/90\n",
      "---------------\n",
      "Epoch 46/90 - Train Loss: 48.3002\n",
      "Per Task Accuracy: {0: 0.8263318646104546, 1: 0.5198007153806847, 2: 0.5405265875064533, 3: 0.2566172578083642}\n",
      "Validation Loss: 295.8285 - Validation Accuracy: 0.5886\n",
      "---------------\n",
      "Epoch 47/90\n",
      "---------------\n",
      "Epoch 47/90 - Train Loss: 44.0186\n",
      "Per Task Accuracy: {0: 0.7976917684758662, 1: 0.44915687276443533, 2: 0.5998967475477542, 3: 0.3819481206987824}\n",
      "Validation Loss: 299.1737 - Validation Accuracy: 0.5837\n",
      "---------------\n",
      "Epoch 48/90\n",
      "---------------\n",
      "Epoch 48/90 - Train Loss: 46.6348\n",
      "Per Task Accuracy: {0: 0.6951482074904867, 1: 0.6845937659683189, 2: 0.6763035622096025, 3: 0.3120698782424563}\n",
      "Validation Loss: 316.8265 - Validation Accuracy: 0.5646\n",
      "---------------\n",
      "Epoch 49/90\n",
      "---------------\n",
      "Epoch 49/90 - Train Loss: 46.9849\n",
      "Per Task Accuracy: {0: 0.7864760664930903, 1: 0.5480327031170158, 2: 0.5223283427981414, 3: 0.28718898888300687}\n",
      "Validation Loss: 295.3936 - Validation Accuracy: 0.5845\n",
      "---------------\n",
      "Epoch 50/90\n",
      "---------------\n",
      "Epoch 50/90 - Train Loss: 46.8570\n",
      "Per Task Accuracy: {0: 0.8403514920889245, 1: 0.48863055697496166, 2: 0.5216830149716056, 3: 0.17787188988883007}\n",
      "Validation Loss: 293.1868 - Validation Accuracy: 0.5920\n",
      "---------------\n",
      "Epoch 51/90\n",
      "---------------\n",
      "Epoch 51/90 - Train Loss: 44.4991\n",
      "Per Task Accuracy: {0: 0.8026987782896055, 1: 0.5300204394481349, 2: 0.5438822922044398, 3: 0.31604023292747485}\n",
      "Validation Loss: 298.1715 - Validation Accuracy: 0.5855\n",
      "---------------\n",
      "Epoch 52/90\n",
      "---------------\n",
      "Epoch 52/90 - Train Loss: 43.2439\n",
      "Per Task Accuracy: {0: 0.8449579411175646, 1: 0.42769545222279, 2: 0.5304594734124936, 3: 0.35190577024880887}\n",
      "Validation Loss: 288.8579 - Validation Accuracy: 0.5986\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 53/90\n",
      "---------------\n",
      "Epoch 53/90 - Train Loss: 44.0722\n",
      "Per Task Accuracy: {0: 0.8493641097536552, 1: 0.45196729688298415, 2: 0.5170366546205473, 3: 0.34475913181577555}\n",
      "Validation Loss: 292.5872 - Validation Accuracy: 0.5988\n",
      "---------------\n",
      "Epoch 54/90\n",
      "---------------\n",
      "Epoch 54/90 - Train Loss: 43.5956\n",
      "Per Task Accuracy: {0: 0.8537702783897456, 1: 0.49987225344915687, 2: 0.49341765616933403, 3: 0.2668078348332451}\n",
      "Validation Loss: 289.9742 - Validation Accuracy: 0.5972\n",
      "---------------\n",
      "Epoch 55/90\n",
      "---------------\n",
      "Epoch 55/90 - Train Loss: 43.5186\n",
      "Per Task Accuracy: {0: 0.8629831764470258, 1: 0.47023505365355134, 2: 0.5496902426432628, 3: 0.24139756484912653}\n",
      "Validation Loss: 292.5727 - Validation Accuracy: 0.5963\n",
      "---------------\n",
      "Epoch 56/90\n",
      "---------------\n",
      "Epoch 56/90 - Train Loss: 43.6933\n",
      "Per Task Accuracy: {0: 0.8269327057881033, 1: 0.449795605518651, 2: 0.5285234899328859, 3: 0.37149285336156695}\n",
      "Validation Loss: 296.1582 - Validation Accuracy: 0.5922\n",
      "---------------\n",
      "Epoch 57/90\n",
      "---------------\n",
      "Epoch 57/90 - Train Loss: 42.6561\n",
      "Per Task Accuracy: {0: 0.7834718606048467, 1: 0.4887583035258048, 2: 0.6214506969540526, 3: 0.36315510852302807}\n",
      "Validation Loss: 303.7752 - Validation Accuracy: 0.5871\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 58/90\n",
      "---------------\n",
      "Epoch 58/90 - Train Loss: 47.2043\n",
      "Per Task Accuracy: {0: 0.7976917684758662, 1: 0.5237608584568216, 2: 0.6013164687661332, 3: 0.1336686077289571}\n",
      "Validation Loss: 304.7991 - Validation Accuracy: 0.5800\n",
      "---------------\n",
      "Epoch 59/90\n",
      "---------------\n",
      "Epoch 59/90 - Train Loss: 44.9463\n",
      "Per Task Accuracy: {0: 0.7908572000801122, 1: 0.540112416964742, 2: 0.5713732576148683, 3: 0.33509793541556376}\n",
      "Validation Loss: 302.9877 - Validation Accuracy: 0.5872\n",
      "---------------\n",
      "Epoch 60/90\n",
      "---------------\n",
      "Epoch 60/90 - Train Loss: 46.8724\n",
      "Per Task Accuracy: {0: 0.6213949529341077, 1: 0.7139754726622382, 2: 0.6406814661848219, 3: 0.33973001588141877}\n",
      "Validation Loss: 328.9053 - Validation Accuracy: 0.5477\n",
      "---------------\n",
      "Epoch 61/90\n",
      "---------------\n",
      "Epoch 61/90 - Train Loss: 46.3085\n",
      "Per Task Accuracy: {0: 0.79185860204286, 1: 0.48556463975472663, 2: 0.4753484770263294, 3: 0.3745367919534145}\n",
      "Validation Loss: 293.7724 - Validation Accuracy: 0.5869\n",
      "---------------\n",
      "Epoch 62/90\n",
      "---------------\n",
      "Epoch 62/90 - Train Loss: 49.1982\n",
      "Per Task Accuracy: {0: 0.7558081313839375, 1: 0.4807102708226878, 2: 0.4370160041300981, 3: 0.3238485971413446}\n",
      "Validation Loss: 299.9896 - Validation Accuracy: 0.5777\n",
      "---------------\n",
      "Epoch 63/90\n",
      "---------------\n",
      "Epoch 63/90 - Train Loss: 49.2169\n",
      "Per Task Accuracy: {0: 0.6847085920288404, 1: 0.6421819110884006, 2: 0.49380485286525555, 3: 0.4216516675489677}\n",
      "Validation Loss: 304.8522 - Validation Accuracy: 0.5735\n",
      "---------------\n",
      "Epoch 64/90\n",
      "---------------\n",
      "Epoch 64/90 - Train Loss: 49.7082\n",
      "Per Task Accuracy: {0: 0.7596134588423793, 1: 0.5314256515074093, 2: 0.4577955601445534, 3: 0.2820275277924828}\n",
      "Validation Loss: 300.1579 - Validation Accuracy: 0.5842\n",
      "---------------\n",
      "Epoch 65/90\n",
      "---------------\n",
      "Epoch 65/90 - Train Loss: 47.0025\n",
      "Per Task Accuracy: {0: 0.7768375726016423, 1: 0.39026571282575373, 2: 0.48825503355704697, 3: 0.41437268395976706}\n",
      "Validation Loss: 301.6949 - Validation Accuracy: 0.5849\n",
      "---------------\n",
      "Epoch 66/90\n",
      "---------------\n",
      "Epoch 66/90 - Train Loss: 46.1103\n",
      "Per Task Accuracy: {0: 0.8088824354095734, 1: 0.40891670924885026, 2: 0.49432111512648425, 3: 0.32371625198517734}\n",
      "Validation Loss: 298.9339 - Validation Accuracy: 0.5894\n",
      "---------------\n",
      "Epoch 67/90\n",
      "---------------\n",
      "Epoch 67/90 - Train Loss: 45.3353\n",
      "Per Task Accuracy: {0: 0.8227268175445623, 1: 0.43536024527337763, 2: 0.4522457408363449, 3: 0.3636844891476972}\n",
      "Validation Loss: 294.9300 - Validation Accuracy: 0.5889\n",
      "---------------\n",
      "Epoch 68/90\n",
      "---------------\n",
      "Epoch 68/90 - Train Loss: 42.2054\n",
      "Per Task Accuracy: {0: 0.8047015822151011, 1: 0.4667858967807869, 2: 0.4847702632937532, 3: 0.40140285865537323}\n",
      "Validation Loss: 295.2493 - Validation Accuracy: 0.5905\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 69/90\n",
      "---------------\n",
      "Epoch 69/90 - Train Loss: 44.4479\n",
      "Per Task Accuracy: {0: 0.6946975766072502, 1: 0.6347726111394992, 2: 0.5957666494579247, 3: 0.4318422445738486}\n",
      "Validation Loss: 310.6878 - Validation Accuracy: 0.5742\n",
      "---------------\n",
      "Epoch 70/90\n",
      "---------------\n",
      "Epoch 70/90 - Train Loss: 43.5843\n",
      "Per Task Accuracy: {0: 0.7814690566793511, 1: 0.48722534491568725, 2: 0.5151006711409396, 3: 0.4375330862890418}\n",
      "Validation Loss: 297.0637 - Validation Accuracy: 0.5872\n",
      "---------------\n",
      "Epoch 71/90\n",
      "---------------\n",
      "Epoch 71/90 - Train Loss: 41.5099\n",
      "Per Task Accuracy: {0: 0.7526286801522131, 1: 0.5026826775677057, 2: 0.5625967991739804, 3: 0.4587083112758073}\n",
      "Validation Loss: 300.8029 - Validation Accuracy: 0.5857\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 72/90\n",
      "---------------\n",
      "Epoch 72/90 - Train Loss: 42.3803\n",
      "Per Task Accuracy: {0: 0.8039004606449028, 1: 0.45975983648441493, 2: 0.47638100154878676, 3: 0.45354685018528323}\n",
      "Validation Loss: 294.5831 - Validation Accuracy: 0.5909\n",
      "---------------\n",
      "Epoch 73/90\n",
      "---------------\n",
      "Epoch 73/90 - Train Loss: 41.7211\n",
      "Per Task Accuracy: {0: 0.8050771079511316, 1: 0.4934849259070005, 2: 0.4940629839958699, 3: 0.42138697723663315}\n",
      "Validation Loss: 290.7301 - Validation Accuracy: 0.5926\n",
      "---------------\n",
      "Epoch 74/90\n",
      "---------------\n",
      "Epoch 74/90 - Train Loss: 41.7747\n",
      "Per Task Accuracy: {0: 0.8088824354095734, 1: 0.49412365866121616, 2: 0.5121321631388746, 3: 0.3413181577554262}\n",
      "Validation Loss: 298.4890 - Validation Accuracy: 0.5885\n",
      "---------------\n",
      "Epoch 75/90\n",
      "---------------\n",
      "Epoch 75/90 - Train Loss: 42.7240\n",
      "Per Task Accuracy: {0: 0.7710294412177048, 1: 0.5255493101686255, 2: 0.5973154362416108, 3: 0.3773160402329275}\n",
      "Validation Loss: 302.6602 - Validation Accuracy: 0.5828\n",
      "---------------\n",
      "Epoch 76/90\n",
      "---------------\n",
      "Epoch 76/90 - Train Loss: 43.7502\n",
      "Per Task Accuracy: {0: 0.7960895253354696, 1: 0.5816300459887583, 2: 0.3963603510583376, 3: 0.3406564319745897}\n",
      "Validation Loss: 295.6561 - Validation Accuracy: 0.5874\n",
      "---------------\n",
      "Epoch 77/90\n",
      "---------------\n",
      "Epoch 77/90 - Train Loss: 41.4234\n",
      "Per Task Accuracy: {0: 0.7702283196475065, 1: 0.48965252938170667, 2: 0.5611770779556015, 3: 0.5164107993647432}\n",
      "Validation Loss: 299.3433 - Validation Accuracy: 0.5895\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 78/90\n",
      "---------------\n",
      "Epoch 78/90 - Train Loss: 41.7673\n",
      "Per Task Accuracy: {0: 0.7914580412577609, 1: 0.41402657128257536, 2: 0.5405265875064533, 3: 0.44587083112758075}\n",
      "Validation Loss: 296.7774 - Validation Accuracy: 0.5958\n",
      "---------------\n",
      "Epoch 79/90\n",
      "---------------\n",
      "Epoch 79/90 - Train Loss: 40.8859\n",
      "Per Task Accuracy: {0: 0.8281343881434008, 1: 0.4512008175779254, 2: 0.5446566855962829, 3: 0.3315246161990471}\n",
      "Validation Loss: 293.6831 - Validation Accuracy: 0.5932\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 80/90\n",
      "---------------\n",
      "Epoch 80/90 - Train Loss: 42.7600\n",
      "Per Task Accuracy: {0: 0.81451532145003, 1: 0.47164026571282575, 2: 0.5327826535880227, 3: 0.4403123345685548}\n",
      "Validation Loss: 295.1096 - Validation Accuracy: 0.5958\n",
      "---------------\n",
      "Epoch 81/90\n",
      "---------------\n",
      "Epoch 81/90 - Train Loss: 40.7411\n",
      "Per Task Accuracy: {0: 0.7530042058882436, 1: 0.4873530914665304, 2: 0.5797625193598348, 3: 0.4974854420328216}\n",
      "Validation Loss: 304.2359 - Validation Accuracy: 0.5860\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 82/90\n",
      "---------------\n",
      "Epoch 82/90 - Train Loss: 40.1942\n",
      "Per Task Accuracy: {0: 0.8026987782896055, 1: 0.4215636177823199, 2: 0.5295560144553433, 3: 0.48676548438327155}\n",
      "Validation Loss: 297.7910 - Validation Accuracy: 0.5926\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 83/90\n",
      "---------------\n",
      "Epoch 83/90 - Train Loss: 41.0726\n",
      "Per Task Accuracy: {0: 0.8225265371520128, 1: 0.391543178334185, 2: 0.5289106866288075, 3: 0.44256220222339865}\n",
      "Validation Loss: 292.9480 - Validation Accuracy: 0.5965\n",
      "---------------\n",
      "Epoch 84/90\n",
      "---------------\n",
      "Epoch 84/90 - Train Loss: 39.3445\n",
      "Per Task Accuracy: {0: 0.792684758662127, 1: 0.43983137455288707, 2: 0.5290397521941146, 3: 0.5214399152991}\n",
      "Validation Loss: 298.6027 - Validation Accuracy: 0.5960\n",
      "Loss decreased, updating the model...\n",
      "---------------\n",
      "Epoch 85/90\n",
      "---------------\n",
      "Epoch 85/90 - Train Loss: 39.4029\n",
      "Per Task Accuracy: {0: 0.7483977568596034, 1: 0.5074092999489014, 2: 0.6064790913784203, 3: 0.5775542615140286}\n",
      "Validation Loss: 299.6944 - Validation Accuracy: 0.5895\n",
      "---------------\n",
      "Epoch 86/90\n",
      "---------------\n",
      "Epoch 86/90 - Train Loss: 41.6566\n",
      "Per Task Accuracy: {0: 0.8313388744241939, 1: 0.3226877874297394, 2: 0.48554465668559627, 3: 0.4690312334568555}\n",
      "Validation Loss: 293.0570 - Validation Accuracy: 0.5948\n",
      "---------------\n",
      "Epoch 87/90\n",
      "---------------\n",
      "Epoch 87/90 - Train Loss: 39.7219\n",
      "Per Task Accuracy: {0: 0.8129130783096334, 1: 0.37544711292795097, 2: 0.5160041300980899, 3: 0.5194547379565908}\n",
      "Validation Loss: 295.6767 - Validation Accuracy: 0.5952\n",
      "---------------\n",
      "Epoch 88/90\n",
      "---------------\n",
      "Epoch 88/90 - Train Loss: 39.8223\n",
      "Per Task Accuracy: {0: 0.8251301822551572, 1: 0.42565150740929997, 2: 0.5135518843572535, 3: 0.45566437268395976}\n",
      "Validation Loss: 291.5259 - Validation Accuracy: 0.5943\n",
      "---------------\n",
      "Epoch 89/90\n",
      "---------------\n",
      "Epoch 89/90 - Train Loss: 42.8787\n",
      "Per Task Accuracy: {0: 0.8613809333066292, 1: 0.4223300970873786, 2: 0.4645069695405266, 3: 0.36381683430386447}\n",
      "Validation Loss: 294.8237 - Validation Accuracy: 0.5908\n",
      "---------------\n",
      "Epoch 90/90\n",
      "---------------\n",
      "Epoch 90/90 - Train Loss: 44.4698\n",
      "Per Task Accuracy: {0: 0.8181203685159223, 1: 0.5682166581502299, 2: 0.5389778007227671, 3: 0.20235574377977766}\n",
      "Validation Loss: 304.5449 - Validation Accuracy: 0.5857\n"
     ]
    }
   ],
   "source": [
    "# 训练\n",
    "model.to(device)\n",
    "model=train_model(device, model, \n",
    "            task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "            criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "            num_epochs=num_epochs,use_weighted_loss=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss on test set: 2.0168\n",
      "Accuracy on test set: 0.5994\n",
      "Loss on test set: 1.6552\n",
      "Accuracy on test set: 0.5620\n",
      "Loss on test set: 1.3633\n",
      "Accuracy on test set: 0.5900\n",
      "Loss on test set: 1.6538\n",
      "Accuracy on test set: 0.4680\n"
     ]
    }
   ],
   "source": [
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[2]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[3]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss on test set: 305.8217\n",
      "Accuracy on test set: 0.5857\n"
     ]
    }
   ],
   "source": [
    "# 在完整数据集上评估模型\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader}, criterion)\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "# 保存模型\n",
    "# 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "accuracy_str = f\"{accuracy:.3f}\"\n",
    "current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "# 使用准确率值作为文件名的一部分\n",
    "file_name = f\"cascade_tsk_{str(tsk)}_P1_{accuracy_str}_{current_time}.pth\"\n",
    "# 确保保存路径存在，如果不存在则创建\n",
    "if not os.path.exists(model_save_pth):\n",
    "    os.makedirs(model_save_pth, exist_ok=True)\n",
    "# 保存模型到指定路径\n",
    "torch.save(model.state_dict(), os.path.join(model_save_pth, file_name))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "dl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
