{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import ConcatDataset,DataLoader\n",
    "from torch import nn, optim\n",
    "import torch\n",
    "import datetime\n",
    "from tqdm.notebook import tqdm\n",
    "import os\n",
    "#from cascade_lib import *\n",
    "from cascade_libary.network import create_ArcMultiTaskModel\n",
    "from cascade_libary.dataset import create_exampler_datasets_and_loaders, create_task_datasets_and_loaders,MyDataset,transform_test,transform_train\n",
    "from cascade_libary.train_eval import train_model, evaluate_model\n",
    "from cascade_libary.criterian import get_class_stats,sort_and_complete_alpha_by_label_mapping,MultiClassFocalLossWithAlpha\n",
    "from cascade_libary.utils import freeze_layers\n",
    "from torchvision import datasets as torchvision_datasets\n",
    "import numpy as np\n",
    "import itertools\n",
    "from kornia.losses import FocalLoss\n",
    "import math\n",
    "import torch.nn.functional as F\n",
    "import random\n",
    "\n",
    "#from kornia.losses import FocalLoss\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    #'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "    #freeze_prefix.append('arc_classifiers.' + str(tsk))\n",
    "\n",
    "    0.16 \n",
    "\n",
    "    46.97\n",
    "\n",
    "\n",
    "    0.25\n",
    "\n",
    "    0.472\n",
    "        gamma 1,0 \n",
    "        0.422\n",
    "\n",
    "\n",
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "weight_label=0.5+((tsk-1)*0.25)\n",
    "\n",
    "不冻arc cls\n",
    "\n",
    "gamma 0，0\n",
    "\n",
    "0.485"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def reweight_alphas(task_class_orders, tsk,alpha_cls, alpha_tsk,w_cls, w_tsk):\n",
    "    \"\"\"\n",
    "    更新alpha_cls和alpha_tsk的函数。\n",
    "    \n",
    "    参数:\n",
    "    task_class_orders -- 任务中类别的顺序列表。\n",
    "    tsk -- 我们关注的是第tsk个任务（索引从1开始）。\n",
    "    w_cls -- alpha_cls需要乘以的权重。\n",
    "    w_tsk -- alpha_tsk需要乘以的权重。\n",
    "    \n",
    "    返回:\n",
    "    更新后的alpha_cls和alpha_tsk列表。\n",
    "    \"\"\"\n",
    "    # 计算第tsk个任务之前的所有任务的类别总数\n",
    "\n",
    "    num_prev_task_classes = sum(len(task) for task in task_class_orders[:tsk])\n",
    "    print(\"num_prev_task_classes:\", num_prev_task_classes)\n",
    "    # 更新alpha_cls\n",
    "    for i in range(num_prev_task_classes):\n",
    "        alpha_cls[i] *= w_cls\n",
    "    \n",
    "    # 更新alpha_tsk\n",
    "    for i in range(tsk):\n",
    "        alpha_tsk[i] *= w_tsk\n",
    "    \n",
    "    return alpha_cls, alpha_tsk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置随机种子\n",
    "def set_seed(seed_value=42):\n",
    "    random.seed(seed_value)  # Python内置的随机模块\n",
    "    np.random.seed(seed_value)  # NumPy的随机数生成器\n",
    "    '''torch.manual_seed(seed_value)  # PyTorch的随机数生成器\n",
    "    # 如果使用CUDA\n",
    "    torch.cuda.manual_seed_all(seed_value)  # 为所有CUDA设备设置随机种子\n",
    "    torch.backends.cudnn.deterministic = True  # 设置确定性卷积\n",
    "    torch.backends.cudnn.benchmark = False  # 为了确保确定性，关闭 cudnn 基准测试'''\n",
    "# 调用函数设置随机种子\n",
    "set_seed(42)\n",
    "\n",
    "gpu=2\n",
    "if torch.cuda.is_available():\n",
    "        torch.cuda.set_device(gpu)\n",
    "        device = 'cuda'\n",
    "else:\n",
    "        print('WARNING: [CUDA unavailable] Using CPU instead!')\n",
    "        device = 'cpu'\n",
    "#device='cpu'\n",
    "model_save_pth='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/loop'\n",
    "\n",
    "s=12.5\n",
    "m=0.0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 初始化"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 在函数外面加载数据集\n",
    "root_dir = '../data'\n",
    "train_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=True, download=True)\n",
    "test_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=False, download=True)\n",
    "\n",
    "# 随机化类别顺序\n",
    "class_order = list(range(100))\n",
    "random.shuffle(class_order)\n",
    "label_mapping = {original_label: new_label for new_label, original_label in enumerate(class_order)}\n",
    "# 定义每个任务的类别数量列表\n",
    "num_classes_per_task = [50, 5, 5, 5, 5, 5,5,5,5,5,5]\n",
    "\n",
    "batch_size = 64\n",
    "# 调用函数创建任务数据集和数据加载器\n",
    "# task_class_orders包含了随机属性\n",
    "origin_train_set, origin_test_set, train_loaders, test_loaders,task_class_orders = create_task_datasets_and_loaders(train_dataset, test_dataset, num_classes_per_task,class_order, batch_size=batch_size, transform_train=transform_train, transform_test=transform_test)\n",
    "\n",
    "# 假设你已经定义了origin_train_set和origin_test_set\n",
    "# 以及数据转换transform_train和transform_test\n",
    "n_samples_per_class = 20\n",
    "exampler_train_sets, exampler_test_sets, exampler_train_loaders, exampler_test_test_loaders = create_exampler_datasets_and_loaders(origin_train_set, origin_test_set, n_samples_per_class, batch_size, transform_train, transform_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "设置标签转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确定原始标签的范围（即最大的原始标签值）\n",
    "max_label = max(label_mapping.keys())\n",
    "\n",
    "# 创建一个长度等于原始标签范围的零数组\n",
    "label_mapping_array = np.zeros(max_label + 1, dtype=int)\n",
    "\n",
    "# 使用 label_mapping 字典填充这个数组\n",
    "for original_label, new_label in label_mapping.items():\n",
    "    label_mapping_array[original_label] = new_label\n",
    "\n",
    "# 将这个数组转换为 PyTorch 张量\n",
    "label_mapping_tensor = torch.from_numpy(label_mapping_array).to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = create_ArcMultiTaskModel('resnet34',512,None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 1\n",
      "Incremental Task Layer added, current len: 1\n",
      "模型已从 /home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.734_20241219_122057.pth 成功加载。\n"
     ]
    }
   ],
   "source": [
    "tsk=1\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=0.5)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "\n",
    "\n",
    "full_path='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.734_20241219_122057.pth'\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 2\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental Task Layer added, current len: 2\n",
      "tsk: 1\n",
      "alpha_cls: [174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      "   6.999951     6.999951     6.999951     6.999951     6.999951  ]\n",
      "alpha_tsk: [3.49998775 1.39999804]\n",
      "num_prev_task_classes: 50\n",
      "alpha_cls: [174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      "   6.999951     6.999951     6.999951     6.999951     6.999951  ]\n",
      "alpha_tsk: [228.14190917  91.2569553 ]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b5eeae9bf16c48beaaa3bc11aa429b1f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1650 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 114.5178\n",
      "Per Task Accuracy: {0: 0.9693570999399159, 1: 0.25523760858456823}\n",
      "Validation Loss: 262.0466 - Validation Accuracy: 0.6284\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 79.1926\n",
      "Per Task Accuracy: {0: 0.9797716803524935, 1: 0.2162749105774144}\n",
      "Validation Loss: 250.9553 - Validation Accuracy: 0.6571\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 72.8476\n",
      "Per Task Accuracy: {0: 0.9751652313238534, 1: 0.2622636688809402}\n",
      "Validation Loss: 272.5158 - Validation Accuracy: 0.6395\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 70.3603\n",
      "Per Task Accuracy: {0: 0.9709593430803124, 1: 0.28781297904956565}\n",
      "Validation Loss: 274.7924 - Validation Accuracy: 0.6355\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 70.2244\n",
      "Per Task Accuracy: {0: 0.9859803725215301, 1: 0.17795094532447625}\n",
      "Validation Loss: 324.5172 - Validation Accuracy: 0.6035\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 78.2004\n",
      "Per Task Accuracy: {0: 0.9473012217103945, 1: 0.2622636688809402}\n",
      "Validation Loss: 316.7748 - Validation Accuracy: 0.6075\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 70.2026\n",
      "Per Task Accuracy: {0: 0.9052173042259163, 1: 0.40508431272355644}\n",
      "Validation Loss: 311.6994 - Validation Accuracy: 0.6013\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 67.4502\n",
      "Per Task Accuracy: {0: 0.9218656118565992, 1: 0.2944557996934083}\n",
      "Validation Loss: 333.6871 - Validation Accuracy: 0.6040\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 65.8889\n",
      "Per Task Accuracy: {0: 0.9370869216903666, 1: 0.3335462442514052}\n",
      "Validation Loss: 299.1695 - Validation Accuracy: 0.6233\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 59.6070\n",
      "Per Task Accuracy: {0: 0.9278740236330864, 1: 0.397547266223812}\n",
      "Validation Loss: 313.4339 - Validation Accuracy: 0.6109\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 54.8503\n",
      "Per Task Accuracy: {0: 0.9146555177248148, 1: 0.4239908022483393}\n",
      "Validation Loss: 325.4056 - Validation Accuracy: 0.6005\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 61.9616\n",
      "Per Task Accuracy: {0: 0.938689164830763, 1: 0.32907511497189573}\n",
      "Validation Loss: 341.5166 - Validation Accuracy: 0.5965\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 63.1371\n",
      "Per Task Accuracy: {0: 0.9502803925495694, 1: 0.3110628513030148}\n",
      "Validation Loss: 391.2401 - Validation Accuracy: 0.5642\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 75.1050\n",
      "Per Task Accuracy: {0: 0.9737632685760064, 1: 0.2584312723556464}\n",
      "Validation Loss: 340.5836 - Validation Accuracy: 0.5829\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 62.6829\n",
      "Per Task Accuracy: {0: 0.9679301021430002, 1: 0.31974961676034747}\n",
      "Validation Loss: 326.5434 - Validation Accuracy: 0.6051\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 57.5643\n",
      "Per Task Accuracy: {0: 0.9521079511315842, 1: 0.3718702095043434}\n",
      "Validation Loss: 325.6602 - Validation Accuracy: 0.6153\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 53.4810\n",
      "Per Task Accuracy: {0: 0.960945323452834, 1: 0.3940981093510475}\n",
      "Validation Loss: 346.2818 - Validation Accuracy: 0.5980\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 52.2110\n",
      "Per Task Accuracy: {0: 0.9370869216903666, 1: 0.46039856923863054}\n",
      "Validation Loss: 358.1994 - Validation Accuracy: 0.6015\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 53.9853\n",
      "Per Task Accuracy: {0: 0.9675545764069697, 1: 0.35973428717424627}\n",
      "Validation Loss: 349.1515 - Validation Accuracy: 0.5969\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 49.8690\n",
      "Per Task Accuracy: {0: 0.93548467854997, 1: 0.5042156361778232}\n",
      "Validation Loss: 355.0964 - Validation Accuracy: 0.5971\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 48.5898\n",
      "Per Task Accuracy: {0: 0.9411175645904266, 1: 0.515074092999489}\n",
      "Validation Loss: 352.3222 - Validation Accuracy: 0.5947\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 46.4396\n",
      "Per Task Accuracy: {0: 0.9264970959343081, 1: 0.5880173735309147}\n",
      "Validation Loss: 361.0923 - Validation Accuracy: 0.6095\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 46.3931\n",
      "Per Task Accuracy: {0: 0.9198878429801722, 1: 0.5249105774144098}\n",
      "Validation Loss: 380.2344 - Validation Accuracy: 0.5898\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 48.6618\n",
      "Per Task Accuracy: {0: 0.9411175645904266, 1: 0.5093254982115483}\n",
      "Validation Loss: 371.5315 - Validation Accuracy: 0.6009\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 51.6097\n",
      "Per Task Accuracy: {0: 0.9200630883236531, 1: 0.5711548288196219}\n",
      "Validation Loss: 398.8636 - Validation Accuracy: 0.5720\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 51.2388\n",
      "Per Task Accuracy: {0: 0.9447226116563189, 1: 0.5053653551354114}\n",
      "Validation Loss: 387.2994 - Validation Accuracy: 0.5898\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 46.9618\n",
      "Per Task Accuracy: {0: 0.9351091528139395, 1: 0.547138477261114}\n",
      "Validation Loss: 384.9983 - Validation Accuracy: 0.5873\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 46.1042\n",
      "Per Task Accuracy: {0: 0.9523332665732025, 1: 0.46116504854368934}\n",
      "Validation Loss: 358.2869 - Validation Accuracy: 0.6035\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 45.8570\n",
      "Per Task Accuracy: {0: 0.9260965351492089, 1: 0.5775421563617782}\n",
      "Validation Loss: 364.9083 - Validation Accuracy: 0.5985\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 43.6138\n",
      "Per Task Accuracy: {0: 0.9062437412377329, 1: 0.6862544711292795}\n",
      "Validation Loss: 364.1487 - Validation Accuracy: 0.5982\n",
      "Loss on test set: 364.4087\n",
      "Accuracy on test set: 0.5982\n",
      "Incremental ARC Classifier added, current len: 3\n",
      "Incremental Task Layer added, current len: 3\n",
      "tsk: 2\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 3.59998704 35.99870405  1.43999793]\n",
      "num_prev_task_classes: 55\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 43.57236687 435.7095518   17.4289844 ]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "74797a88cbad4d9aa354ebc3a23c7c16",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1710 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 153.0036\n",
      "Per Task Accuracy: {0: 0.774884838774284, 1: 0.22023505365355137, 2: 0.3926174496644295}\n",
      "Validation Loss: 482.4730 - Validation Accuracy: 0.4242\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 82.7901\n",
      "Per Task Accuracy: {0: 0.9068696174644503, 1: 0.1702861522738886, 2: 0.24651522973670625}\n",
      "Validation Loss: 424.4101 - Validation Accuracy: 0.5002\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 65.2127\n",
      "Per Task Accuracy: {0: 0.9431203685159223, 1: 0.18254982115482882, 2: 0.1390036138358286}\n",
      "Validation Loss: 397.4258 - Validation Accuracy: 0.5388\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 52.5736\n",
      "Per Task Accuracy: {0: 0.9254956939715602, 1: 0.307230454777721, 2: 0.2268972638100155}\n",
      "Validation Loss: 396.7941 - Validation Accuracy: 0.5488\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 46.4762\n",
      "Per Task Accuracy: {0: 0.9317043861405968, 1: 0.3640776699029126, 2: 0.18701600413009808}\n",
      "Validation Loss: 402.1072 - Validation Accuracy: 0.5498\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 42.8222\n",
      "Per Task Accuracy: {0: 0.9196875625876226, 1: 0.45464997445068983, 2: 0.18546721734641197}\n",
      "Validation Loss: 406.8081 - Validation Accuracy: 0.5632\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 44.5771\n",
      "Per Task Accuracy: {0: 0.9547366312837973, 1: 0.37544711292795097, 2: 0.12093443469282396}\n",
      "Validation Loss: 412.2829 - Validation Accuracy: 0.5620\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 52.2803\n",
      "Per Task Accuracy: {0: 0.9463248547967154, 1: 0.3893714869698518, 2: 0.05059370160041301}\n",
      "Validation Loss: 434.9586 - Validation Accuracy: 0.5385\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 44.1282\n",
      "Per Task Accuracy: {0: 0.9407170038053274, 1: 0.3530914665304037, 2: 0.09499225606608157}\n",
      "Validation Loss: 447.9520 - Validation Accuracy: 0.5363\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 50.0655\n",
      "Per Task Accuracy: {0: 0.9262968155417585, 1: 0.471895758814512, 2: 0.1223541559112029}\n",
      "Validation Loss: 466.7612 - Validation Accuracy: 0.5263\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 48.2152\n",
      "Per Task Accuracy: {0: 0.9557380332465452, 1: 0.40355135411343895, 2: 0.10583376355188436}\n",
      "Validation Loss: 426.4359 - Validation Accuracy: 0.5457\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 44.6658\n",
      "Per Task Accuracy: {0: 0.9763669136791508, 1: 0.29330608073582015, 2: 0.10325245224574084}\n",
      "Validation Loss: 448.0020 - Validation Accuracy: 0.5415\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 40.7718\n",
      "Per Task Accuracy: {0: 0.95814139795714, 1: 0.41734798160449665, 2: 0.11925658234383067}\n",
      "Validation Loss: 430.9402 - Validation Accuracy: 0.5582\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 36.3418\n",
      "Per Task Accuracy: {0: 0.9823753254556379, 1: 0.3237097598364844, 2: 0.1233866804336603}\n",
      "Validation Loss: 419.2284 - Validation Accuracy: 0.5717\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 34.7659\n",
      "Per Task Accuracy: {0: 0.9743641097536552, 1: 0.37966274910577413, 2: 0.14197212183789365}\n",
      "Validation Loss: 432.4299 - Validation Accuracy: 0.5702\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 33.8825\n",
      "Per Task Accuracy: {0: 0.9643500901261767, 1: 0.4243740419008687, 2: 0.14248838409912234}\n",
      "Validation Loss: 450.6517 - Validation Accuracy: 0.5603\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 36.3736\n",
      "Per Task Accuracy: {0: 0.9479270979371119, 1: 0.4253960143076137, 2: 0.05162622612287042}\n",
      "Validation Loss: 481.4159 - Validation Accuracy: 0.5257\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 41.8907\n",
      "Per Task Accuracy: {0: 0.9679551371920689, 1: 0.39984670413898826, 2: 0.08931337119256583}\n",
      "Validation Loss: 459.6172 - Validation Accuracy: 0.5498\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 36.2498\n",
      "Per Task Accuracy: {0: 0.9775685960344482, 1: 0.33456821665815023, 2: 0.11564274651522974}\n",
      "Validation Loss: 455.5541 - Validation Accuracy: 0.5505\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 32.9733\n",
      "Per Task Accuracy: {0: 0.9649509313038254, 1: 0.4311446090955544, 2: 0.11822405782137325}\n",
      "Validation Loss: 448.1405 - Validation Accuracy: 0.5638\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 32.7561\n",
      "Per Task Accuracy: {0: 0.9719607450430603, 1: 0.3460654062340317, 2: 0.14506969540526587}\n",
      "Validation Loss: 454.8261 - Validation Accuracy: 0.5595\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 32.1279\n",
      "Per Task Accuracy: {0: 0.9517324253955538, 1: 0.4573326520183955, 2: 0.1486835312338668}\n",
      "Validation Loss: 473.5457 - Validation Accuracy: 0.5532\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 31.6727\n",
      "Per Task Accuracy: {0: 0.9559383136390948, 1: 0.4361267245784364, 2: 0.1590087764584409}\n",
      "Validation Loss: 481.9383 - Validation Accuracy: 0.5440\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 32.8469\n",
      "Per Task Accuracy: {0: 0.9569397156018425, 1: 0.4464741951967297, 2: 0.146618482188952}\n",
      "Validation Loss: 481.4556 - Validation Accuracy: 0.5440\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 32.6907\n",
      "Per Task Accuracy: {0: 0.9549369116763469, 1: 0.5095809913132345, 2: 0.146618482188952}\n",
      "Validation Loss: 492.5573 - Validation Accuracy: 0.5438\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 31.3331\n",
      "Per Task Accuracy: {0: 0.9627478469857801, 1: 0.4538834951456311, 2: 0.16313887454827053}\n",
      "Validation Loss: 487.7535 - Validation Accuracy: 0.5480\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 33.6912\n",
      "Per Task Accuracy: {0: 0.9497296214700581, 1: 0.5238886050076648, 2: 0.1590087764584409}\n",
      "Validation Loss: 473.9710 - Validation Accuracy: 0.5437\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 34.3940\n",
      "Per Task Accuracy: {0: 0.9743641097536552, 1: 0.38758303525804805, 2: 0.06504904491481672}\n",
      "Validation Loss: 480.1967 - Validation Accuracy: 0.5465\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 36.0788\n",
      "Per Task Accuracy: {0: 0.972561586220709, 1: 0.3878385283597343, 2: 0.1362932369643779}\n",
      "Validation Loss: 506.2527 - Validation Accuracy: 0.5415\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 34.3686\n",
      "Per Task Accuracy: {0: 0.9717604646505107, 1: 0.3999744506898314, 2: 0.13061435209086217}\n",
      "Validation Loss: 496.3043 - Validation Accuracy: 0.5357\n",
      "Loss on test set: 497.6128\n",
      "Accuracy on test set: 0.5357\n",
      "Incremental ARC Classifier added, current len: 4\n",
      "Incremental Task Layer added, current len: 4\n",
      "tsk: 3\n",
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 3.69998631 36.99863105 36.99863105  1.47999781]\n",
      "num_prev_task_classes: 60\n",
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 32.02097104 320.19904778 320.19904778  12.80841685]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "fb67102cf79247749cb3e9378c12c097",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1740 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 137.2509\n",
      "Per Task Accuracy: {0: 0.9811736431003405, 1: 0.044583546244251405, 2: 0.0, 3: 0.0021175224986765486}\n",
      "Validation Loss: 464.9339 - Validation Accuracy: 0.4866\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 91.8206\n",
      "Per Task Accuracy: {0: 0.9905868215501702, 1: 0.08277976494634645, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 469.2513 - Validation Accuracy: 0.4989\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 76.8122\n",
      "Per Task Accuracy: {0: 0.9775685960344482, 1: 0.21448645886561063, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 473.4763 - Validation Accuracy: 0.5257\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 66.4192\n",
      "Per Task Accuracy: {0: 0.9759663528940516, 1: 0.27899846704138986, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 459.8269 - Validation Accuracy: 0.5483\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 60.2504\n",
      "Per Task Accuracy: {0: 0.9819747646705388, 1: 0.27082268778742974, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 472.4451 - Validation Accuracy: 0.5508\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 56.6123\n",
      "Per Task Accuracy: {0: 0.9795713999599439, 1: 0.2982881962187021, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 486.4861 - Validation Accuracy: 0.5534\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 55.2240\n",
      "Per Task Accuracy: {0: 0.9787702783897456, 1: 0.31029637199795607, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 492.8543 - Validation Accuracy: 0.5471\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 55.0070\n",
      "Per Task Accuracy: {0: 0.9761666332866012, 1: 0.29994890137966274, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 512.5999 - Validation Accuracy: 0.5491\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 54.8870\n",
      "Per Task Accuracy: {0: 0.9847786901662328, 1: 0.26009197751660706, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 519.9876 - Validation Accuracy: 0.5469\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 55.6844\n",
      "Per Task Accuracy: {0: 0.9799719607450431, 1: 0.3053142565150741, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 512.4434 - Validation Accuracy: 0.5409\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 55.0830\n",
      "Per Task Accuracy: {0: 0.9837772882034849, 1: 0.2503832396525294, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 509.0727 - Validation Accuracy: 0.5537\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 53.2679\n",
      "Per Task Accuracy: {0: 0.9769677548567995, 1: 0.34555442003065917, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 524.0490 - Validation Accuracy: 0.5460\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 52.2819\n",
      "Per Task Accuracy: {0: 0.9769677548567995, 1: 0.3620337250894226, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 534.6526 - Validation Accuracy: 0.5368\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 51.6387\n",
      "Per Task Accuracy: {0: 0.9785699979971961, 1: 0.33559018906489524, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 549.9266 - Validation Accuracy: 0.5348\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 53.8080\n",
      "Per Task Accuracy: {0: 0.9847786901662328, 1: 0.23684210526315788, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 557.3522 - Validation Accuracy: 0.5243\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 63.5015\n",
      "Per Task Accuracy: {0: 0.9871820548768275, 1: 0.19264179867143588, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 571.6910 - Validation Accuracy: 0.4878\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 64.8338\n",
      "Per Task Accuracy: {0: 0.9759663528940516, 1: 0.358456821665815, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 527.6037 - Validation Accuracy: 0.5291\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 59.8491\n",
      "Per Task Accuracy: {0: 0.9845784097736832, 1: 0.29560551865099643, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 546.2421 - Validation Accuracy: 0.5222\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 61.5780\n",
      "Per Task Accuracy: {0: 0.9893851391948728, 1: 0.24003576903423607, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 535.8083 - Validation Accuracy: 0.5089\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 60.1433\n",
      "Per Task Accuracy: {0: 0.9767674744642499, 1: 0.3799182422074604, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 524.6980 - Validation Accuracy: 0.5245\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 56.3003\n",
      "Per Task Accuracy: {0: 0.9791708391748448, 1: 0.3621614716402657, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 535.9786 - Validation Accuracy: 0.5315\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 52.5056\n",
      "Per Task Accuracy: {0: 0.9625475665932305, 1: 0.49374041900868676, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 568.4283 - Validation Accuracy: 0.5289\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 53.0557\n",
      "Per Task Accuracy: {0: 0.9837772882034849, 1: 0.30416453755748596, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 561.4506 - Validation Accuracy: 0.5220\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 51.9361\n",
      "Per Task Accuracy: {0: 0.9793711195673943, 1: 0.35194174757281554, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 554.9295 - Validation Accuracy: 0.5305\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 51.2090\n",
      "Per Task Accuracy: {0: 0.9889845784097737, 1: 0.32575370464997444, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 536.4765 - Validation Accuracy: 0.5320\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 52.3521\n",
      "Per Task Accuracy: {0: 0.9781694372120969, 1: 0.3978027593254982, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 541.4864 - Validation Accuracy: 0.5355\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 53.0984\n",
      "Per Task Accuracy: {0: 0.9837772882034849, 1: 0.3391670924885028, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 564.6702 - Validation Accuracy: 0.5192\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 55.1602\n",
      "Per Task Accuracy: {0: 0.9757660725015022, 1: 0.3331630045988758, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 588.8725 - Validation Accuracy: 0.5015\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 54.3093\n",
      "Per Task Accuracy: {0: 0.9755657921089526, 1: 0.34121103730199287, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 547.3594 - Validation Accuracy: 0.5314\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 52.9469\n",
      "Per Task Accuracy: {0: 0.9799719607450431, 1: 0.3484925907000511, 2: 0.0, 3: 0.0}\n",
      "Validation Loss: 540.3869 - Validation Accuracy: 0.5431\n",
      "Loss on test set: 542.7038\n",
      "Accuracy on test set: 0.5431\n",
      "Incremental ARC Classifier added, current len: 5\n",
      "Incremental Task Layer added, current len: 5\n",
      "tsk: 4\n",
      "alpha_cls: [189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      "   7.59994224   7.59994224   7.59994224   7.59994224   7.59994224]\n",
      "alpha_tsk: [ 3.79998556 37.99855605 37.99855605 37.99855605  1.51999769]\n",
      "num_prev_task_classes: 65\n",
      "alpha_cls: [189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      "   7.59994224   7.59994224   7.59994224   7.59994224   7.59994224]\n",
      "alpha_tsk: [ 28.17574613 281.74782555 281.74782555 281.74782555  11.27032415]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c92a2ed4fc154841bf77c14a90bfc6d0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1800 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 149.7348\n",
      "Per Task Accuracy: {0: 0.9915882235129181, 1: 0.1852324987225345, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 519.4882 - Validation Accuracy: 0.4687\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 104.1298\n",
      "Per Task Accuracy: {0: 0.9935910274384138, 1: 0.19008686765457333, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 533.2449 - Validation Accuracy: 0.5031\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 93.0588\n",
      "Per Task Accuracy: {0: 0.9835770078109353, 1: 0.2984159427695452, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 532.2161 - Validation Accuracy: 0.5134\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 86.0280\n",
      "Per Task Accuracy: {0: 0.9715601842579611, 1: 0.3741696474195197, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 557.9821 - Validation Accuracy: 0.5176\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 85.9279\n",
      "Per Task Accuracy: {0: 0.9945924294011617, 1: 0.17271333673990802, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 599.2921 - Validation Accuracy: 0.5080\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 84.0807\n",
      "Per Task Accuracy: {0: 0.9791708391748448, 1: 0.35436893203883496, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 778.4477 - Validation Accuracy: 0.3280\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 81.7039\n",
      "Per Task Accuracy: {0: 0.9759663528940516, 1: 0.39026571282575373, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 578.6180 - Validation Accuracy: 0.5154\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 79.8482\n",
      "Per Task Accuracy: {0: 0.969757660725015, 1: 0.44213081246806335, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 589.9337 - Validation Accuracy: 0.5190\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 80.3124\n",
      "Per Task Accuracy: {0: 0.9759663528940516, 1: 0.33635666836995404, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 599.1227 - Validation Accuracy: 0.5121\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 82.8685\n",
      "Per Task Accuracy: {0: 0.9729621470058082, 1: 0.3680378129790496, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 616.8641 - Validation Accuracy: 0.5030\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 83.6573\n",
      "Per Task Accuracy: {0: 0.9857800921289805, 1: 0.3138732754215636, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 608.6822 - Validation Accuracy: 0.5081\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 82.0511\n",
      "Per Task Accuracy: {0: 0.9821750450630883, 1: 0.3277976494634645, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 593.3269 - Validation Accuracy: 0.5150\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 85.0351\n",
      "Per Task Accuracy: {0: 0.9797716803524935, 1: 0.34491568727644356, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 606.8765 - Validation Accuracy: 0.5047\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 83.0535\n",
      "Per Task Accuracy: {0: 0.9805728019226918, 1: 0.3497700562084824, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 587.7710 - Validation Accuracy: 0.5069\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 78.2307\n",
      "Per Task Accuracy: {0: 0.9823753254556379, 1: 0.31221257026060295, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 588.8887 - Validation Accuracy: 0.5150\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 78.7212\n",
      "Per Task Accuracy: {0: 0.9865812136991788, 1: 0.2899846704138988, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 598.9043 - Validation Accuracy: 0.5214\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 77.3785\n",
      "Per Task Accuracy: {0: 0.9389144802723813, 1: 0.534108329075115, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 658.1069 - Validation Accuracy: 0.4823\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 86.4658\n",
      "Per Task Accuracy: {0: 0.9805728019226918, 1: 0.2821921308124681, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 613.1201 - Validation Accuracy: 0.5007\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 88.2917\n",
      "Per Task Accuracy: {0: 0.9841778489885841, 1: 0.272100153295861, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 602.8478 - Validation Accuracy: 0.4859\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 88.5800\n",
      "Per Task Accuracy: {0: 0.9839775685960345, 1: 0.31119059785385794, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 583.1535 - Validation Accuracy: 0.5197\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 83.8965\n",
      "Per Task Accuracy: {0: 0.9765671940717003, 1: 0.3616504854368932, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 580.0606 - Validation Accuracy: 0.5090\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 80.2291\n",
      "Per Task Accuracy: {0: 0.9863809333066292, 1: 0.3083801737353091, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 606.7414 - Validation Accuracy: 0.5004\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 79.5597\n",
      "Per Task Accuracy: {0: 0.9831764470258362, 1: 0.37123147675012774, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 611.2806 - Validation Accuracy: 0.5120\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 78.4402\n",
      "Per Task Accuracy: {0: 0.9777688764269978, 1: 0.38962698007153806, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 603.7447 - Validation Accuracy: 0.5187\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 77.3663\n",
      "Per Task Accuracy: {0: 0.9811736431003405, 1: 0.4077669902912621, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 600.1712 - Validation Accuracy: 0.5227\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 75.9321\n",
      "Per Task Accuracy: {0: 0.9827758862407371, 1: 0.37033725089422587, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 613.5570 - Validation Accuracy: 0.5227\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 76.0354\n",
      "Per Task Accuracy: {0: 0.9783697176046465, 1: 0.4118548799182422, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 623.4250 - Validation Accuracy: 0.5186\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 77.0765\n",
      "Per Task Accuracy: {0: 0.9811736431003405, 1: 0.3663771078180889, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 595.4742 - Validation Accuracy: 0.5231\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 75.8699\n",
      "Per Task Accuracy: {0: 0.9805728019226918, 1: 0.37455288707204903, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 630.3261 - Validation Accuracy: 0.5109\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 82.3293\n",
      "Per Task Accuracy: {0: 0.9392900060084117, 1: 0.5112416964741952, 2: 0.0, 3: 0.0, 4: 0.0}\n",
      "Validation Loss: 639.4375 - Validation Accuracy: 0.4747\n",
      "Loss on test set: 643.0639\n",
      "Accuracy on test set: 0.4747\n",
      "Incremental ARC Classifier added, current len: 6\n",
      "Incremental Task Layer added, current len: 6\n",
      "tsk: 5\n",
      "alpha_cls: [194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      "   7.79993916   7.79993916   7.79993916   7.79993916   7.79993916]\n",
      "alpha_tsk: [ 3.89998479 38.99847906 38.99847906 38.99847906 38.99847906  1.55999757]\n",
      "num_prev_task_classes: 70\n",
      "alpha_cls: [194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      "   7.79993916   7.79993916   7.79993916   7.79993916   7.79993916]\n",
      "alpha_tsk: [ 26.44792715 264.4699886  264.4699886  264.4699886  264.4699886\n",
      "  10.57919561]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d6729c2669e547458c09411cc2e60fd0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1830 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 179.5898\n",
      "Per Task Accuracy: {0: 0.9903865411576207, 1: 0.1041134389371487, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0021041557075223566}\n",
      "Validation Loss: 613.9358 - Validation Accuracy: 0.4555\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 141.0097\n",
      "Per Task Accuracy: {0: 0.9679551371920689, 1: 0.292156361778232, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 599.2425 - Validation Accuracy: 0.4739\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 137.7799\n",
      "Per Task Accuracy: {0: 0.9857800921289805, 1: 0.1992846193152785, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 615.6789 - Validation Accuracy: 0.4843\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 127.7097\n",
      "Per Task Accuracy: {0: 0.9847786901662328, 1: 0.21601941747572814, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 605.8342 - Validation Accuracy: 0.4977\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 123.3753\n",
      "Per Task Accuracy: {0: 0.9783697176046465, 1: 0.2907511497189576, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 596.1570 - Validation Accuracy: 0.5135\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 119.3895\n",
      "Per Task Accuracy: {0: 0.9791708391748448, 1: 0.30033214103219213, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 610.8122 - Validation Accuracy: 0.5044\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 118.4181\n",
      "Per Task Accuracy: {0: 0.9825756058481875, 1: 0.2945835462442514, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 630.9245 - Validation Accuracy: 0.4949\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 123.0254\n",
      "Per Task Accuracy: {0: 0.9545113158421791, 1: 0.3321410321921308, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 687.6892 - Validation Accuracy: 0.4541\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 120.6175\n",
      "Per Task Accuracy: {0: 0.9801722411375926, 1: 0.3018650996423097, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 645.5786 - Validation Accuracy: 0.4876\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 117.7338\n",
      "Per Task Accuracy: {0: 0.9751652313238534, 1: 0.3240929994890138, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 645.2353 - Validation Accuracy: 0.4903\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 118.3701\n",
      "Per Task Accuracy: {0: 0.9843781293811336, 1: 0.256004087889627, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 662.5160 - Validation Accuracy: 0.4896\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 117.3363\n",
      "Per Task Accuracy: {0: 0.9827758862407371, 1: 0.26609606540623404, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 647.0692 - Validation Accuracy: 0.4923\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 116.0223\n",
      "Per Task Accuracy: {0: 0.9825756058481875, 1: 0.30045988758303527, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 645.5764 - Validation Accuracy: 0.4955\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 116.7136\n",
      "Per Task Accuracy: {0: 0.9775685960344482, 1: 0.3463208993357179, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 643.8556 - Validation Accuracy: 0.4988\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 115.0885\n",
      "Per Task Accuracy: {0: 0.9917885039054677, 1: 0.20337250894225856, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 680.2128 - Validation Accuracy: 0.4843\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 116.4589\n",
      "Per Task Accuracy: {0: 0.9865812136991788, 1: 0.28398058252427183, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 645.8190 - Validation Accuracy: 0.5017\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 114.1449\n",
      "Per Task Accuracy: {0: 0.9805728019226918, 1: 0.3225600408788963, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 639.1818 - Validation Accuracy: 0.5111\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 112.4326\n",
      "Per Task Accuracy: {0: 0.9819747646705388, 1: 0.3307358201328564, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 645.4928 - Validation Accuracy: 0.5075\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 112.3748\n",
      "Per Task Accuracy: {0: 0.9899859803725215, 1: 0.26814001021972406, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 645.7376 - Validation Accuracy: 0.5051\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 114.1481\n",
      "Per Task Accuracy: {0: 0.9857800921289805, 1: 0.29854368932038833, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 661.8733 - Validation Accuracy: 0.4949\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 113.0961\n",
      "Per Task Accuracy: {0: 0.9815742038854396, 1: 0.3133622892181911, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 670.6455 - Validation Accuracy: 0.4983\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 114.8575\n",
      "Per Task Accuracy: {0: 0.9865812136991788, 1: 0.2687787429739397, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 666.7042 - Validation Accuracy: 0.4857\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 113.6037\n",
      "Per Task Accuracy: {0: 0.9823753254556379, 1: 0.2883239652529382, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 701.5994 - Validation Accuracy: 0.4783\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 118.3086\n",
      "Per Task Accuracy: {0: 0.9863809333066292, 1: 0.2706949412365866, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 669.7543 - Validation Accuracy: 0.4829\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 115.2264\n",
      "Per Task Accuracy: {0: 0.9825756058481875, 1: 0.30927439959121106, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 667.8083 - Validation Accuracy: 0.4949\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 116.4780\n",
      "Per Task Accuracy: {0: 0.9819747646705388, 1: 0.35258048032703115, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 692.9943 - Validation Accuracy: 0.4791\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 117.4376\n",
      "Per Task Accuracy: {0: 0.9789705587822952, 1: 0.3808124680633623, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 694.8242 - Validation Accuracy: 0.4699\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 117.6000\n",
      "Per Task Accuracy: {0: 0.9825756058481875, 1: 0.3235820132856413, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 700.0679 - Validation Accuracy: 0.4624\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 120.6283\n",
      "Per Task Accuracy: {0: 0.9805728019226918, 1: 0.34989780275932547, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 652.9253 - Validation Accuracy: 0.4899\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 115.8383\n",
      "Per Task Accuracy: {0: 0.9731624273983577, 1: 0.40419008686765456, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}\n",
      "Validation Loss: 672.1359 - Validation Accuracy: 0.4927\n",
      "Loss on test set: 676.7654\n",
      "Accuracy on test set: 0.4927\n",
      "Incremental ARC Classifier added, current len: 7\n",
      "Incremental Task Layer added, current len: 7\n",
      "tsk: 6\n",
      "alpha_cls: [199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008   7.999936   7.999936   7.999936\n",
      "   7.999936   7.999936]\n",
      "alpha_tsk: [ 3.999984   39.99840006 39.99840006 39.99840006 39.99840006 39.99840006\n",
      "  1.59999744]\n",
      "num_prev_task_classes: 75\n",
      "alpha_cls: [199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008   7.999936   7.999936   7.999936\n",
      "   7.999936   7.999936]\n",
      "alpha_tsk: [ 25.59890226 255.97980735 255.97980735 255.97980735 255.97980735\n",
      " 255.97980735  10.23958548]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c9a4115d526f4189af2a7588159474f2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1890 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 213.2617\n",
      "Per Task Accuracy: {0: 0.9783697176046465, 1: 0.321282575370465, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 680.0243 - Validation Accuracy: 0.4110\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 177.5558\n",
      "Per Task Accuracy: {0: 0.9767674744642499, 1: 0.3516862544711293, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 671.5582 - Validation Accuracy: 0.4554\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 170.7681\n",
      "Per Task Accuracy: {0: 0.9831764470258362, 1: 0.33188553909044455, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 657.2502 - Validation Accuracy: 0.4684\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 165.0214\n",
      "Per Task Accuracy: {0: 0.9833767274183858, 1: 0.3452989269289729, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 661.7854 - Validation Accuracy: 0.4718\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 162.4280\n",
      "Per Task Accuracy: {0: 0.9853795313438815, 1: 0.32383750638732756, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 666.4651 - Validation Accuracy: 0.4731\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 162.3401\n",
      "Per Task Accuracy: {0: 0.9771680352493491, 1: 0.3771078180889116, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 690.2601 - Validation Accuracy: 0.4704\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 161.4337\n",
      "Per Task Accuracy: {0: 0.9835770078109353, 1: 0.35117526826775675, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 683.1338 - Validation Accuracy: 0.4794\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 157.9951\n",
      "Per Task Accuracy: {0: 0.9789705587822952, 1: 0.38515585079202863, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 714.7562 - Validation Accuracy: 0.4696\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 156.6840\n",
      "Per Task Accuracy: {0: 0.9803725215301422, 1: 0.37698007153806845, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 701.3220 - Validation Accuracy: 0.4734\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 156.2906\n",
      "Per Task Accuracy: {0: 0.989785699979972, 1: 0.3114460909555442, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 706.4729 - Validation Accuracy: 0.4699\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 156.5486\n",
      "Per Task Accuracy: {0: 0.9859803725215301, 1: 0.35769034236075625, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 727.1849 - Validation Accuracy: 0.4629\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 158.3807\n",
      "Per Task Accuracy: {0: 0.986981774484278, 1: 0.31962187020950433, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 752.7853 - Validation Accuracy: 0.4533\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 157.0076\n",
      "Per Task Accuracy: {0: 0.9891848588023232, 1: 0.29381706693919263, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 733.9513 - Validation Accuracy: 0.4713\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 158.5672\n",
      "Per Task Accuracy: {0: 0.9835770078109353, 1: 0.3161727133367399, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 741.6705 - Validation Accuracy: 0.4648\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 158.6878\n",
      "Per Task Accuracy: {0: 0.991187662727819, 1: 0.23339294839039346, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 735.9088 - Validation Accuracy: 0.4674\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 158.5101\n",
      "Per Task Accuracy: {0: 0.9891848588023232, 1: 0.29151762902401634, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 719.2080 - Validation Accuracy: 0.4708\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 159.4418\n",
      "Per Task Accuracy: {0: 0.9859803725215301, 1: 0.3018650996423097, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 750.5329 - Validation Accuracy: 0.4694\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 159.6005\n",
      "Per Task Accuracy: {0: 0.9847786901662328, 1: 0.29151762902401634, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 746.8190 - Validation Accuracy: 0.4556\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 171.3694\n",
      "Per Task Accuracy: {0: 0.9621219707590627, 1: 0.24833929483903935, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 856.3272 - Validation Accuracy: 0.3779\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 252.4328\n",
      "Per Task Accuracy: {0: 0.9773432805928299, 1: 0.1580224833929484, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 713.3313 - Validation Accuracy: 0.4016\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 206.3074\n",
      "Per Task Accuracy: {0: 0.9819747646705388, 1: 0.18638221768012264, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 688.1460 - Validation Accuracy: 0.4341\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 185.3723\n",
      "Per Task Accuracy: {0: 0.9811736431003405, 1: 0.22215125191619825, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 650.0926 - Validation Accuracy: 0.4581\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 165.4093\n",
      "Per Task Accuracy: {0: 0.9807730823152413, 1: 0.27427184466019416, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 646.7667 - Validation Accuracy: 0.4779\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 161.8804\n",
      "Per Task Accuracy: {0: 0.9781694372120969, 1: 0.28040367910066427, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 660.8447 - Validation Accuracy: 0.4825\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 159.4079\n",
      "Per Task Accuracy: {0: 0.9841778489885841, 1: 0.2561318344404701, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 678.1699 - Validation Accuracy: 0.4773\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 158.0766\n",
      "Per Task Accuracy: {0: 0.9775685960344482, 1: 0.3079969340827798, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 679.3336 - Validation Accuracy: 0.4753\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 157.0734\n",
      "Per Task Accuracy: {0: 0.9865812136991788, 1: 0.24974450689831373, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}\n",
      "Validation Loss: 688.5306 - Validation Accuracy: 0.4775\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[16], line 92\u001b[0m\n\u001b[1;32m     90\u001b[0m \u001b[38;5;66;03m#criterion_label=None\u001b[39;00m\n\u001b[1;32m     91\u001b[0m model\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m---> 92\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[43mtrain_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     93\u001b[0m \u001b[43m            \u001b[49m\u001b[43mtask_class_orders\u001b[49m\u001b[43m,\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcombined_train_loader\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mval\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcombined_test_loader\u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     94\u001b[0m \u001b[43m            \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43mweight_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43mscheduler\u001b[49m\u001b[43m,\u001b[49m\u001b[43mlabel_mapping\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlabel_mapping\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     95\u001b[0m \u001b[43m            \u001b[49m\u001b[43mnum_epochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_epochs\u001b[49m\u001b[43m,\u001b[49m\u001b[43muse_weighted_loss\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m     98\u001b[0m \u001b[38;5;66;03m# 在完整数据集上评估模型\u001b[39;00m\n\u001b[1;32m     99\u001b[0m loss, accuracy \u001b[38;5;241m=\u001b[39m evaluate_model(device,label_mapping,model, {\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mval\u001b[39m\u001b[38;5;124m'\u001b[39m: combined_test_loader}, criterion)\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/train_eval.py:184\u001b[0m, in \u001b[0;36mtrain_model\u001b[0;34m(device, model, task_class_orders, dataloaders, criterion, criterion_label, weight_label, optimizer, scheduler, label_mapping, num_epochs, stop_accuracy, use_weighted_loss, weighted_loss_momentum)\u001b[0m\n\u001b[1;32m    181\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_epochs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    182\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m-\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m15\u001b[39m)\n\u001b[0;32m--> 184\u001b[0m train_loss, _, _, _ \u001b[38;5;241m=\u001b[39m \u001b[43mtrain_one_epoch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43mdataloaders\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43mweight_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpbar\u001b[49m\u001b[43m,\u001b[49m\u001b[43mlabel_mapping_tensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtask_label_mapping_tensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclass_weights\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    185\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_epochs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m - Train Loss: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtrain_loss\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m.4f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    186\u001b[0m val_loss, val_accuracy, _,_ \u001b[38;5;241m=\u001b[39m validate_one_epoch(model, dataloaders[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mval\u001b[39m\u001b[38;5;124m'\u001b[39m], criterion,label_mapping_tensor, task_label_mapping_tensor,device)\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/train_eval.py:29\u001b[0m, in \u001b[0;36mtrain_one_epoch\u001b[0;34m(model, dataloader, criterion, criterion_label, weight_label, optimizer, device, pbar, label_mapping_tensor, task_label_mapping_tensor, class_weights)\u001b[0m\n\u001b[1;32m     26\u001b[0m inputs, rwa_labels \u001b[38;5;241m=\u001b[39m inputs\u001b[38;5;241m.\u001b[39mto(device), rwa_labels\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[1;32m     27\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n\u001b[0;32m---> 29\u001b[0m _, outputs,tsk_outputs \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     30\u001b[0m \u001b[38;5;66;03m#print(f\"tsk_outputs shape: {tsk_outputs.shape}\")\u001b[39;00m\n\u001b[1;32m     31\u001b[0m labels \u001b[38;5;241m=\u001b[39m label_mapping_tensor[rwa_labels]\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/network.py:189\u001b[0m, in \u001b[0;36mArcMultiTaskModel.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m    187\u001b[0m gate_weights \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m    188\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m layer \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mincremental_layers:\n\u001b[0;32m--> 189\u001b[0m     x,gate_weight \u001b[38;5;241m=\u001b[39m \u001b[43mlayer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    190\u001b[0m     gate_weights\u001b[38;5;241m.\u001b[39mappend(gate_weight\u001b[38;5;241m.\u001b[39msqueeze())\n\u001b[1;32m    191\u001b[0m \u001b[38;5;66;03m# 初始化task_predictions列表\u001b[39;00m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/network.py:120\u001b[0m, in \u001b[0;36mIncrementalTaskLayer.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m    117\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[1;32m    118\u001b[0m     \u001b[38;5;66;03m#print('x shape:', x.shape)\u001b[39;00m\n\u001b[1;32m    119\u001b[0m     \u001b[38;5;66;03m# 通过门控层\u001b[39;00m\n\u001b[0;32m--> 120\u001b[0m     gated_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlayer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    122\u001b[0m     \u001b[38;5;66;03m# 通过门控权重生成网络，生成门控权重\u001b[39;00m\n\u001b[1;32m    123\u001b[0m     \u001b[38;5;66;03m#print('torch.cat([x, gated_output]:', torch.cat([x, gated_output], dim=1).shape)\u001b[39;00m\n\u001b[1;32m    124\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m'''#NT2\u001b[39;00m\n\u001b[1;32m    125\u001b[0m \u001b[38;5;124;03m    gate_weight = self.gate_generator(torch.cat([x, gated_output], dim=1)).unsqueeze(-1).unsqueeze(-1)'''\u001b[39;00m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/container.py:250\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    248\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[1;32m    249\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m module \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m:\n\u001b[0;32m--> 250\u001b[0m         \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m    251\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28minput\u001b[39m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/conv.py:554\u001b[0m, in \u001b[0;36mConv2d.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    553\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 554\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_conv_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/conv.py:549\u001b[0m, in \u001b[0;36mConv2d._conv_forward\u001b[0;34m(self, input, weight, bias)\u001b[0m\n\u001b[1;32m    537\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mzeros\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m    538\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mconv2d(\n\u001b[1;32m    539\u001b[0m         F\u001b[38;5;241m.\u001b[39mpad(\n\u001b[1;32m    540\u001b[0m             \u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reversed_padding_repeated_twice, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    547\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgroups,\n\u001b[1;32m    548\u001b[0m     )\n\u001b[0;32m--> 549\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2d\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    550\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdilation\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgroups\u001b[49m\n\u001b[1;32m    551\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "for tsk in range(1,10):\n",
    "\n",
    "    model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[tsk], s=s, m=m)\n",
    "    model.add_incremental_task_layer() \n",
    "    #print('tsk:',tsk)\n",
    "    '''freeze_prefix = init_freeze_prefixes\n",
    "    if tsk == 0:\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk))\n",
    "    else:\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk)+'.layer' )\n",
    "\n",
    "    freeze_prefix.append('arc_classifiers.' + str(tsk))'''\n",
    "\n",
    "    #-----------------------------------------------\n",
    "    init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    'fc',\n",
    "    ]\n",
    "    print('tsk:',tsk)\n",
    "    #print('tsk:',tsk)\n",
    "    freeze_prefix = init_freeze_prefixes\n",
    "\n",
    "    if tsk == 1:\n",
    "         freeze_prefix.append( 'incremental_layers.' + str(tsk-1) )\n",
    "    else:\n",
    "        for layer in range(0,tsk-1):\n",
    "            freeze_prefix.append( 'incremental_layers.' + str(layer))\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk-1)+'.layer' )\n",
    "    for layer in range(0,tsk):\n",
    "        freeze_prefix.append('arc_classifiers.' + str(layer))\n",
    "    #-----------------------------------------------\n",
    "\n",
    "\n",
    "    combined_train_set = []\n",
    "    for i in range(tsk):\n",
    "        #print(i)\n",
    "        # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "        # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "        combined_train_set.append(exampler_train_sets[i])\n",
    "    combined_train_set.append(origin_train_set[tsk])\n",
    "    combined_train_set=ConcatDataset(combined_train_set)\n",
    "    # 创建一个新的DataLoader来加载合并后的数据集\n",
    "    combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                    batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "\n",
    "\n",
    "    # 使用ConcatDataset合并这两个数据集\n",
    "    combined_test_set = []\n",
    "    for i in range(tsk+1):\n",
    "        # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "        # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "        combined_test_set.append(origin_test_set[i])\n",
    "    combined_test_set=ConcatDataset(combined_test_set)\n",
    "    # 创建一个新的DataLoader来加载合并后的数据集\n",
    "    combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                    batch_size=batch_size, shuffle=False, num_workers=4)\n",
    "    \n",
    "\n",
    "    num_epochs=30\n",
    "    weight_decay=0.001\n",
    "    max_lr=0.01\n",
    "    #weight_label=1.5+((tsk-1)*0.16)\n",
    "    weight_label=0.5+((tsk-1)*0.25)\n",
    "    # 创建调度器和优化器\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "    scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                    steps_per_epoch=len(combined_train_loader),three_phase=False)\n",
    "    class_stats = get_class_stats(combined_train_loader)\n",
    "    # 计算补齐后的 alpha\n",
    "    alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "    alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "    print(\"alpha_cls:\", alpha_cls)\n",
    "    print(\"alpha_tsk:\", alpha_tsk)\n",
    "    alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                    alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                    w_cls=1, w_tsk=1)\n",
    "    \n",
    "    #alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "    alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "    print(\"alpha_cls:\", alpha_cls)\n",
    "    print(\"alpha_tsk:\", alpha_tsk)\n",
    "    criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=0,reduction='mean') \n",
    "    criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction='mean') \n",
    "    #criterion_label=None\n",
    "    model.to(device)\n",
    "    model=train_model(device, model, \n",
    "                task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "                criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "                num_epochs=num_epochs,use_weighted_loss=False)\n",
    "\n",
    "\n",
    "    # 在完整数据集上评估模型\n",
    "    loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader}, criterion)\n",
    "    print(f'Loss on test set: {loss:.4f}')\n",
    "    print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "    # 保存模型\n",
    "    # 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "    accuracy_str = f\"{accuracy:.3f}\"\n",
    "    current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "    # 使用准确率值作为文件名的一部分\n",
    "    file_name = f\"Loop_{str(tsk)}_{accuracy_str}_{current_time}.pth\"\n",
    "    # 确保保存路径存在，如果不存在则创建\n",
    "    task_save_pth = os.path.join(model_save_pth, str(tsk))\n",
    "    if not os.path.exists(task_save_pth):\n",
    "        os.makedirs(task_save_pth, exist_ok=True)\n",
    "    # 保存模型到指定路径\n",
    "    torch.save(model.state_dict(), os.path.join(task_save_pth, file_name))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "dl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
