{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import ConcatDataset,DataLoader\n",
    "from torch import nn, optim\n",
    "import torch\n",
    "import datetime\n",
    "from tqdm.notebook import tqdm\n",
    "import os\n",
    "#from cascade_lib import *\n",
    "from cascade_libary.networkv1 import create_ArcMultiTaskModel\n",
    "from cascade_libary.dataset import create_exampler_datasets_and_loaders, create_task_datasets_and_loaders,MyDataset,transform_test,transform_train\n",
    "from cascade_libary.train_eval import train_model, evaluate_model\n",
    "from cascade_libary.criterian import get_class_stats,sort_and_complete_alpha_by_label_mapping,MultiClassFocalLossWithAlpha\n",
    "from cascade_libary.utils import freeze_layers\n",
    "from torchvision import datasets as torchvision_datasets\n",
    "import numpy as np\n",
    "import itertools\n",
    "from kornia.losses import FocalLoss\n",
    "import math\n",
    "import torch.nn.functional as F\n",
    "import random\n",
    "\n",
    "#from kornia.losses import FocalLoss\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    #'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "    #freeze_prefix.append('arc_classifiers.' + str(tsk))\n",
    "\n",
    "    0.16 \n",
    "\n",
    "    46.97\n",
    "\n",
    "\n",
    "    0.25\n",
    "\n",
    "    0.472\n",
    "        gamma 1,0 \n",
    "        0.422\n",
    "\n",
    "\n",
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "weight_label=0.5+((tsk-1)*0.25)\n",
    "\n",
    "不冻arc cls\n",
    "\n",
    "gamma 0，0\n",
    "\n",
    "0.485"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "def reweight_alphas(task_class_orders, tsk,alpha_cls, alpha_tsk,w_cls, w_tsk):\n",
    "    \"\"\"\n",
    "    更新alpha_cls和alpha_tsk的函数。\n",
    "    \n",
    "    参数:\n",
    "    task_class_orders -- 任务中类别的顺序列表。\n",
    "    tsk -- 我们关注的是第tsk个任务（索引从1开始）。\n",
    "    w_cls -- alpha_cls需要乘以的权重。\n",
    "    w_tsk -- alpha_tsk需要乘以的权重。\n",
    "    \n",
    "    返回:\n",
    "    更新后的alpha_cls和alpha_tsk列表。\n",
    "    \"\"\"\n",
    "    # 计算第tsk个任务之前的所有任务的类别总数\n",
    "\n",
    "    num_prev_task_classes = sum(len(task) for task in task_class_orders[:tsk])\n",
    "    print(\"num_prev_task_classes:\", num_prev_task_classes)\n",
    "    # 更新alpha_cls\n",
    "    for i in range(num_prev_task_classes):\n",
    "        alpha_cls[i] *= w_cls\n",
    "    \n",
    "    # 更新alpha_tsk\n",
    "    for i in range(tsk):\n",
    "        alpha_tsk[i] *= w_tsk\n",
    "    \n",
    "    return alpha_cls, alpha_tsk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置随机种子\n",
    "def set_seed(seed_value=42):\n",
    "    random.seed(seed_value)  # Python内置的随机模块\n",
    "    np.random.seed(seed_value)  # NumPy的随机数生成器\n",
    "    '''torch.manual_seed(seed_value)  # PyTorch的随机数生成器\n",
    "    # 如果使用CUDA\n",
    "    torch.cuda.manual_seed_all(seed_value)  # 为所有CUDA设备设置随机种子\n",
    "    torch.backends.cudnn.deterministic = True  # 设置确定性卷积\n",
    "    torch.backends.cudnn.benchmark = False  # 为了确保确定性，关闭 cudnn 基准测试'''\n",
    "# 调用函数设置随机种子\n",
    "set_seed(42)\n",
    "\n",
    "gpu=1\n",
    "if torch.cuda.is_available():\n",
    "        torch.cuda.set_device(gpu)\n",
    "        device = 'cuda'\n",
    "else:\n",
    "        print('WARNING: [CUDA unavailable] Using CPU instead!')\n",
    "        device = 'cpu'\n",
    "#device='cpu'\n",
    "model_save_pth='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/loop'\n",
    "\n",
    "s=12.5\n",
    "m=0.0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 初始化"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 在函数外面加载数据集\n",
    "root_dir = '../data'\n",
    "train_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=True, download=True)\n",
    "test_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=False, download=True)\n",
    "\n",
    "# 随机化类别顺序\n",
    "class_order = list(range(100))\n",
    "random.shuffle(class_order)\n",
    "label_mapping = {original_label: new_label for new_label, original_label in enumerate(class_order)}\n",
    "# 定义每个任务的类别数量列表\n",
    "num_classes_per_task = [50, 5, 5, 5, 5, 5,5,5,5,5,5]\n",
    "\n",
    "batch_size = 128\n",
    "# 调用函数创建任务数据集和数据加载器\n",
    "# task_class_orders包含了随机属性\n",
    "origin_train_set, origin_test_set, train_loaders, test_loaders,task_class_orders = create_task_datasets_and_loaders(train_dataset, test_dataset, num_classes_per_task,class_order, batch_size=batch_size, transform_train=transform_train, transform_test=transform_test)\n",
    "\n",
    "# 假设你已经定义了origin_train_set和origin_test_set\n",
    "# 以及数据转换transform_train和transform_test\n",
    "n_samples_per_class = 20\n",
    "exampler_train_sets, exampler_test_sets, exampler_train_loaders, exampler_test_test_loaders = create_exampler_datasets_and_loaders(origin_train_set, origin_test_set, n_samples_per_class, batch_size, transform_train, transform_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "设置标签转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确定原始标签的范围（即最大的原始标签值）\n",
    "max_label = max(label_mapping.keys())\n",
    "\n",
    "# 创建一个长度等于原始标签范围的零数组\n",
    "label_mapping_array = np.zeros(max_label + 1, dtype=int)\n",
    "\n",
    "# 使用 label_mapping 字典填充这个数组\n",
    "for original_label, new_label in label_mapping.items():\n",
    "    label_mapping_array[original_label] = new_label\n",
    "\n",
    "# 将这个数组转换为 PyTorch 张量\n",
    "label_mapping_tensor = torch.from_numpy(label_mapping_array).to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = create_ArcMultiTaskModel('resnet34',512,None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 1\n",
      "Incremental Task Layer added, current len: 1\n",
      "模型已从 /home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.754_20241224_084407.pth 成功加载。\n"
     ]
    }
   ],
   "source": [
    "tsk=1\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "\n",
    "\n",
    "full_path='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.754_20241224_084407.pth'\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 2\n",
      "Incremental Task Layer added, current len: 2\n",
      "tsk: 1\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "incremental_layers.1.layer.0.weight\n",
      "incremental_layers.1.layer.0.bias\n",
      "incremental_layers.1.layer.1.weight\n",
      "incremental_layers.1.layer.1.bias\n",
      "incremental_layers.1.layer.3.weight\n",
      "incremental_layers.1.layer.3.bias\n",
      "incremental_layers.1.layer.4.weight\n",
      "incremental_layers.1.layer.4.bias\n",
      "incremental_layers.1.layer.6.weight\n",
      "incremental_layers.1.layer.6.bias\n",
      "incremental_layers.1.layer.7.weight\n",
      "incremental_layers.1.layer.7.bias\n",
      "incremental_layers.1.gate_generator.0.weight\n",
      "incremental_layers.1.gate_generator.0.bias\n",
      "incremental_layers.1.gate_generator.1.weight\n",
      "incremental_layers.1.gate_generator.1.bias\n",
      "incremental_layers.1.gate_generator.5.weight\n",
      "incremental_layers.1.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "arc_classifiers.1.4.weight\n",
      "alpha_cls: [174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      "   6.999951     6.999951     6.999951     6.999951     6.999951  ]\n",
      "alpha_tsk: [3.49998775 1.39999804]\n",
      "num_prev_task_classes: 50\n",
      "alpha_cls: [174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      "   6.999951     6.999951     6.999951     6.999951     6.999951  ]\n",
      "alpha_tsk: [228.14190917  91.2569553 ]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b6b47edb5a2c4a45a66066dd10e45653",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1680 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 29.0445\n",
      "Per Task Accuracy: {0: 0.8285302954431647, 1: 0.48119723714504986}\n",
      "Validation Loss: 180.7740 - Validation Accuracy: 0.6800\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 18.2117\n",
      "Per Task Accuracy: {0: 0.4593515272909364, 1: 0.8976080839089281}\n",
      "Validation Loss: 206.1233 - Validation Accuracy: 0.6905\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 13.6839\n",
      "Per Task Accuracy: {0: 0.32134451677516274, 1: 0.9095676643642875}\n",
      "Validation Loss: 202.0270 - Validation Accuracy: 0.7065\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 12.8633\n",
      "Per Task Accuracy: {0: 0.35118928392588883, 1: 0.8797006907137376}\n",
      "Validation Loss: 206.2149 - Validation Accuracy: 0.7022\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 11.4554\n",
      "Per Task Accuracy: {0: 0.3127190786179269, 1: 0.9201202353543105}\n",
      "Validation Loss: 205.1585 - Validation Accuracy: 0.7075\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 10.4625\n",
      "Per Task Accuracy: {0: 0.12362293440160241, 1: 0.9784471731900742}\n",
      "Validation Loss: 219.3709 - Validation Accuracy: 0.7044\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 9.3990\n",
      "Per Task Accuracy: {0: 0.06430896344516775, 1: 0.9939882322844718}\n",
      "Validation Loss: 217.3333 - Validation Accuracy: 0.7025\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 8.2672\n",
      "Per Task Accuracy: {0: 0.06571106659989985, 1: 0.9921335379892555}\n",
      "Validation Loss: 219.6776 - Validation Accuracy: 0.7040\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 9.5422\n",
      "Per Task Accuracy: {0: 0.11319479218828242, 1: 0.9861217702737273}\n",
      "Validation Loss: 218.8466 - Validation Accuracy: 0.7053\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 8.1482\n",
      "Per Task Accuracy: {0: 0.14544316474712068, 1: 0.9780634433358916}\n",
      "Validation Loss: 218.4309 - Validation Accuracy: 0.7091\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 8.5040\n",
      "Per Task Accuracy: {0: 0.13984727090635954, 1: 0.9779994883601945}\n",
      "Validation Loss: 220.0474 - Validation Accuracy: 0.6998\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 7.4066\n",
      "Per Task Accuracy: {0: 0.12319729594391587, 1: 0.9841391660271169}\n",
      "Validation Loss: 227.6524 - Validation Accuracy: 0.6969\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 6.6887\n",
      "Per Task Accuracy: {0: 0.05409364046069104, 1: 0.9921335379892555}\n",
      "Validation Loss: 224.4830 - Validation Accuracy: 0.7007\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 6.1916\n",
      "Per Task Accuracy: {0: 0.04768402603905859, 1: 0.9960347915067792}\n",
      "Validation Loss: 227.3206 - Validation Accuracy: 0.7031\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 6.1389\n",
      "Per Task Accuracy: {0: 0.06290686029043566, 1: 0.9861857252494244}\n",
      "Validation Loss: 236.7174 - Validation Accuracy: 0.6976\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 5.6287\n",
      "Per Task Accuracy: {0: 0.07774161241862794, 1: 0.9605397799948836}\n",
      "Validation Loss: 238.9776 - Validation Accuracy: 0.6969\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 5.7921\n",
      "Per Task Accuracy: {0: 0.045080120180270404, 1: 0.9863775901765157}\n",
      "Validation Loss: 239.9343 - Validation Accuracy: 0.6985\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 5.6242\n",
      "Per Task Accuracy: {0: 0.031246870305458186, 1: 0.9882962394474291}\n",
      "Validation Loss: 240.5744 - Validation Accuracy: 0.6958\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 5.8962\n",
      "Per Task Accuracy: {0: 0.013620430645968953, 1: 0.9980173957533897}\n",
      "Validation Loss: 238.9719 - Validation Accuracy: 0.6882\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 5.4234\n",
      "Per Task Accuracy: {0: 0.007411116675012519, 1: 0.9980173957533897}\n",
      "Validation Loss: 251.4799 - Validation Accuracy: 0.6920\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 4.8740\n",
      "Per Task Accuracy: {0: 0.006009013520280421, 1: 0.9980173957533897}\n",
      "Validation Loss: 238.2625 - Validation Accuracy: 0.6969\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 4.9769\n",
      "Per Task Accuracy: {0: 0.005007511266900351, 1: 0.9980173957533897}\n",
      "Validation Loss: 243.7310 - Validation Accuracy: 0.6949\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 4.0475\n",
      "Per Task Accuracy: {0: 0.005207811717576364, 1: 0.9980173957533897}\n",
      "Validation Loss: 252.3857 - Validation Accuracy: 0.6898\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 4.8222\n",
      "Per Task Accuracy: {0: 0.005207811717576364, 1: 0.9980173957533897}\n",
      "Validation Loss: 249.5862 - Validation Accuracy: 0.6920\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 4.1051\n",
      "Per Task Accuracy: {0: 0.005007511266900351, 1: 0.9980173957533897}\n",
      "Validation Loss: 253.9356 - Validation Accuracy: 0.6942\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 3.8130\n",
      "Per Task Accuracy: {0: 0.006009013520280421, 1: 0.9980173957533897}\n",
      "Validation Loss: 254.1162 - Validation Accuracy: 0.6929\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 3.9178\n",
      "Per Task Accuracy: {0: 0.0058087130696044065, 1: 0.9980173957533897}\n",
      "Validation Loss: 260.7882 - Validation Accuracy: 0.6931\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 3.9100\n",
      "Per Task Accuracy: {0: 0.006409614421632449, 1: 0.9980173957533897}\n",
      "Validation Loss: 252.5806 - Validation Accuracy: 0.6893\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 4.8186\n",
      "Per Task Accuracy: {0: 0.007010515773660491, 1: 0.9980173957533897}\n",
      "Validation Loss: 252.4435 - Validation Accuracy: 0.6847\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 4.2000\n",
      "Per Task Accuracy: {0: 0.007611417125688533, 1: 0.9980173957533897}\n",
      "Validation Loss: 251.8661 - Validation Accuracy: 0.6915\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 3.4119\n",
      "Per Task Accuracy: {0: 0.009013520280420632, 1: 0.9980173957533897}\n",
      "Validation Loss: 249.1541 - Validation Accuracy: 0.6915\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 2.9059\n",
      "Per Task Accuracy: {0: 0.010615923885828743, 1: 0.9980173957533897}\n",
      "Validation Loss: 269.3800 - Validation Accuracy: 0.6820\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 3.6155\n",
      "Per Task Accuracy: {0: 0.012018027040560842, 1: 0.9980173957533897}\n",
      "Validation Loss: 258.4694 - Validation Accuracy: 0.6904\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 3.7715\n",
      "Per Task Accuracy: {0: 0.012018027040560842, 1: 0.9980173957533897}\n",
      "Validation Loss: 260.3982 - Validation Accuracy: 0.6844\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 3.1886\n",
      "Per Task Accuracy: {0: 0.012618928392588883, 1: 0.9980173957533897}\n",
      "Validation Loss: 274.0765 - Validation Accuracy: 0.6784\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 3.1704\n",
      "Per Task Accuracy: {0: 0.017626439659489235, 1: 0.9900869787669481}\n",
      "Validation Loss: 266.5185 - Validation Accuracy: 0.6840\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 3.4966\n",
      "Per Task Accuracy: {0: 0.01782674011016525, 1: 0.9920056280378613}\n",
      "Validation Loss: 269.4712 - Validation Accuracy: 0.6793\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 3.0859\n",
      "Per Task Accuracy: {0: 0.02244616925388082, 1: 0.995906881555385}\n",
      "Validation Loss: 275.3938 - Validation Accuracy: 0.6818\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 2.8286\n",
      "Per Task Accuracy: {0: 0.016236855282924387, 1: 0.995906881555385}\n",
      "Validation Loss: 256.3539 - Validation Accuracy: 0.6904\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 2.5235\n",
      "Per Task Accuracy: {0: 0.01042814221331998, 1: 0.9960347915067792}\n",
      "Validation Loss: 275.1105 - Validation Accuracy: 0.6762\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 2.4681\n",
      "Per Task Accuracy: {0: 0.008212318477716574, 1: 1.0}\n",
      "Validation Loss: 276.2794 - Validation Accuracy: 0.6725\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 3.1236\n",
      "Per Task Accuracy: {0: 0.009414121181772659, 1: 0.9960347915067792}\n",
      "Validation Loss: 261.6365 - Validation Accuracy: 0.6875\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 2.7314\n",
      "Per Task Accuracy: {0: 0.010215322984476714, 1: 0.9939882322844718}\n",
      "Validation Loss: 265.9516 - Validation Accuracy: 0.6822\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 2.8009\n",
      "Per Task Accuracy: {0: 0.009414121181772659, 1: 0.9979534407776925}\n",
      "Validation Loss: 274.6669 - Validation Accuracy: 0.6785\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 2.4067\n",
      "Per Task Accuracy: {0: 0.013219829744616926, 1: 0.9920056280378613}\n",
      "Validation Loss: 274.3214 - Validation Accuracy: 0.6816\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 2.4837\n",
      "Per Task Accuracy: {0: 0.013019529293940912, 1: 0.9939242773087746}\n",
      "Validation Loss: 272.7055 - Validation Accuracy: 0.6833\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 2.9910\n",
      "Per Task Accuracy: {0: 0.010215322984476714, 1: 0.9939242773087746}\n",
      "Validation Loss: 276.5591 - Validation Accuracy: 0.6787\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 2.3761\n",
      "Per Task Accuracy: {0: 0.00200300450676014, 1: 0.9959708365310821}\n",
      "Validation Loss: 269.9784 - Validation Accuracy: 0.6869\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 3.4711\n",
      "Per Task Accuracy: {0: 0.0024036054081121683, 1: 0.9959708365310821}\n",
      "Validation Loss: 281.2293 - Validation Accuracy: 0.6729\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 3.0566\n",
      "Per Task Accuracy: {0: 0.0032048072108162243, 1: 1.0}\n",
      "Validation Loss: 269.4981 - Validation Accuracy: 0.6804\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 2.8902\n",
      "Per Task Accuracy: {0: 0.0038057085628442665, 1: 0.9979534407776925}\n",
      "Validation Loss: 258.1284 - Validation Accuracy: 0.6876\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 2.7191\n",
      "Per Task Accuracy: {0: 0.014021031547320982, 1: 0.9861217702737273}\n",
      "Validation Loss: 276.9149 - Validation Accuracy: 0.6787\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 2.2558\n",
      "Per Task Accuracy: {0: 0.020030045067601403, 1: 0.9782553082629829}\n",
      "Validation Loss: 272.1727 - Validation Accuracy: 0.6847\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 2.4219\n",
      "Per Task Accuracy: {0: 0.011817726589884827, 1: 0.9980813507290868}\n",
      "Validation Loss: 276.4154 - Validation Accuracy: 0.6749\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 2.4254\n",
      "Per Task Accuracy: {0: 0.012418627941912869, 1: 0.9980813507290868}\n",
      "Validation Loss: 295.8155 - Validation Accuracy: 0.6651\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 1.8767\n",
      "Per Task Accuracy: {0: 0.013620430645968953, 1: 0.9980813507290868}\n",
      "Validation Loss: 283.2042 - Validation Accuracy: 0.6780\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 2.4543\n",
      "Per Task Accuracy: {0: 0.014421632448673009, 1: 0.9980813507290868}\n",
      "Validation Loss: 269.9842 - Validation Accuracy: 0.6864\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 1.9436\n",
      "Per Task Accuracy: {0: 0.009814722083124687, 1: 0.9960347915067792}\n",
      "Validation Loss: 283.5845 - Validation Accuracy: 0.6778\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 1.9866\n",
      "Per Task Accuracy: {0: 0.010615923885828743, 1: 0.9960347915067792}\n",
      "Validation Loss: 287.0885 - Validation Accuracy: 0.6760\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 1.9764\n",
      "Per Task Accuracy: {0: 0.009213820731096644, 1: 0.9980813507290868}\n",
      "Validation Loss: 287.9007 - Validation Accuracy: 0.6765\n",
      "Loss on test set: 288.1056\n",
      "Accuracy on test set: 0.6765\n",
      "Incremental ARC Classifier added, current len: 3\n",
      "Incremental Task Layer added, current len: 3\n",
      "tsk: 2\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'arc_classifiers.0.4', 'arc_classifiers.0.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "incremental_layers.2.layer.0.weight\n",
      "incremental_layers.2.layer.0.bias\n",
      "incremental_layers.2.layer.1.weight\n",
      "incremental_layers.2.layer.1.bias\n",
      "incremental_layers.2.layer.3.weight\n",
      "incremental_layers.2.layer.3.bias\n",
      "incremental_layers.2.layer.4.weight\n",
      "incremental_layers.2.layer.4.bias\n",
      "incremental_layers.2.layer.6.weight\n",
      "incremental_layers.2.layer.6.bias\n",
      "incremental_layers.2.layer.7.weight\n",
      "incremental_layers.2.layer.7.bias\n",
      "incremental_layers.2.gate_generator.0.weight\n",
      "incremental_layers.2.gate_generator.0.bias\n",
      "incremental_layers.2.gate_generator.1.weight\n",
      "incremental_layers.2.gate_generator.1.bias\n",
      "incremental_layers.2.gate_generator.5.weight\n",
      "incremental_layers.2.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "arc_classifiers.2.4.weight\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 3.59998704 35.99870405  1.43999793]\n",
      "num_prev_task_classes: 55\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 43.57236687 435.7095518   17.4289844 ]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a5e19a8b92a64f0c91ed9a4cfc6261c0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1740 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 21.1804\n",
      "Per Task Accuracy: {0: 0.0030045067601402104, 1: 0.13059606037349705, 2: 0.7801400051853772}\n",
      "Validation Loss: 312.0152 - Validation Accuracy: 0.6153\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 15.2930\n",
      "Per Task Accuracy: {0: 0.005207811717576364, 1: 0.09938603223330775, 2: 0.7482499351827846}\n",
      "Validation Loss: 320.4389 - Validation Accuracy: 0.6123\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 12.4578\n",
      "Per Task Accuracy: {0: 0.004606910365548322, 1: 0.047838321821437706, 2: 0.8159191081151154}\n",
      "Validation Loss: 322.8723 - Validation Accuracy: 0.6145\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 11.0054\n",
      "Per Task Accuracy: {0: 0.0030045067601402104, 1: 0.035622921463289846, 2: 0.8747731397459165}\n",
      "Validation Loss: 325.0503 - Validation Accuracy: 0.6170\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 11.2472\n",
      "Per Task Accuracy: {0: 0.0032048072108162243, 1: 0.07975185469429522, 2: 0.8120300751879699}\n",
      "Validation Loss: 326.0848 - Validation Accuracy: 0.6197\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 10.1613\n",
      "Per Task Accuracy: {0: 0.0008012018027040561, 1: 0.08960092095165004, 2: 0.8882551205600208}\n",
      "Validation Loss: 319.5705 - Validation Accuracy: 0.6302\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 10.8100\n",
      "Per Task Accuracy: {0: 0.00200300450676014, 1: 0.24449987209004861, 2: 0.7652968628467721}\n",
      "Validation Loss: 316.7170 - Validation Accuracy: 0.6277\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 10.0283\n",
      "Per Task Accuracy: {0: 0.00100150225338007, 1: 0.11377590176515733, 2: 0.8726341716359866}\n",
      "Validation Loss: 333.4489 - Validation Accuracy: 0.6170\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 11.7326\n",
      "Per Task Accuracy: {0: 0.0016024036054081122, 1: 0.062036326426195956, 2: 0.878662172673062}\n",
      "Validation Loss: 319.4467 - Validation Accuracy: 0.6247\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 10.0656\n",
      "Per Task Accuracy: {0: 0.0016024036054081122, 1: 0.059797902276797134, 2: 0.9204044594244232}\n",
      "Validation Loss: 326.3003 - Validation Accuracy: 0.6210\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 9.0618\n",
      "Per Task Accuracy: {0: 0.00200300450676014, 1: 0.13174724993604503, 2: 0.9040705211304122}\n",
      "Validation Loss: 333.9734 - Validation Accuracy: 0.6183\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 8.5621\n",
      "Per Task Accuracy: {0: 0.001402103154732098, 1: 0.017843438219493475, 2: 0.933367902514908}\n",
      "Validation Loss: 324.9887 - Validation Accuracy: 0.6243\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 8.4614\n",
      "Per Task Accuracy: {0: 0.0024036054081121683, 1: 0.05570478383218214, 2: 0.8768472906403941}\n",
      "Validation Loss: 331.8400 - Validation Accuracy: 0.6210\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 7.9321\n",
      "Per Task Accuracy: {0: 0.00020030045067601402, 1: 0.023791250959324637, 2: 0.9896292455276121}\n",
      "Validation Loss: 330.9586 - Validation Accuracy: 0.6200\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 8.5266\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.004029163468917882, 2: 1.0}\n",
      "Validation Loss: 332.1624 - Validation Accuracy: 0.6202\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 11.1160\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.037989255564082884, 2: 0.9937775473165673}\n",
      "Validation Loss: 326.4566 - Validation Accuracy: 0.6195\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 9.0482\n",
      "Per Task Accuracy: {0: 0.00040060090135202804, 1: 0.08960092095165004, 2: 0.9813326419497018}\n",
      "Validation Loss: 315.7644 - Validation Accuracy: 0.6325\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 11.8854\n",
      "Per Task Accuracy: {0: 0.00100150225338007, 1: 0.10168841135840369, 2: 0.9385532797511019}\n",
      "Validation Loss: 317.0156 - Validation Accuracy: 0.6262\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 9.0274\n",
      "Per Task Accuracy: {0: 0.005608412618928392, 1: 0.22384241493988233, 2: 0.7720378532538242}\n",
      "Validation Loss: 317.2206 - Validation Accuracy: 0.6260\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 7.7656\n",
      "Per Task Accuracy: {0: 0.004406609914872309, 1: 0.08416474801739575, 2: 0.8060668913663469}\n",
      "Validation Loss: 332.5477 - Validation Accuracy: 0.6227\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 7.7384\n",
      "Per Task Accuracy: {0: 0.001802704056084126, 1: 0.07591455615246866, 2: 0.809437386569873}\n",
      "Validation Loss: 332.8305 - Validation Accuracy: 0.6190\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 7.8709\n",
      "Per Task Accuracy: {0: 0.00020030045067601402, 1: 0.1459452545408033, 2: 0.878662172673062}\n",
      "Validation Loss: 337.4758 - Validation Accuracy: 0.6233\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 7.3473\n",
      "Per Task Accuracy: {0: 0.00020030045067601402, 1: 0.21987720644666156, 2: 0.8278454757583614}\n",
      "Validation Loss: 339.3621 - Validation Accuracy: 0.6242\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 7.0336\n",
      "Per Task Accuracy: {0: 0.00020030045067601402, 1: 0.030314658480429777, 2: 0.8957739175525019}\n",
      "Validation Loss: 341.2401 - Validation Accuracy: 0.6220\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 7.0089\n",
      "Per Task Accuracy: {0: 0.0022033049574361543, 1: 0.05199539524174981, 2: 0.8633653098262899}\n",
      "Validation Loss: 334.9457 - Validation Accuracy: 0.6240\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 7.1070\n",
      "Per Task Accuracy: {0: 0.005408112168252379, 1: 0.1724226144794065, 2: 0.795696136893959}\n",
      "Validation Loss: 338.3996 - Validation Accuracy: 0.6173\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 6.9253\n",
      "Per Task Accuracy: {0: 0.005608412618928392, 1: 0.12356101304681504, 2: 0.7757324345346124}\n",
      "Validation Loss: 336.7330 - Validation Accuracy: 0.6242\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 7.8799\n",
      "Per Task Accuracy: {0: 0.004206309464196294, 1: 0.0858915323612177, 2: 0.7770287788436608}\n",
      "Validation Loss: 341.6586 - Validation Accuracy: 0.6183\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 7.5517\n",
      "Per Task Accuracy: {0: 0.0008012018027040561, 1: 0.017907393195190587, 2: 0.9128856624319419}\n",
      "Validation Loss: 332.0063 - Validation Accuracy: 0.6222\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 6.8589\n",
      "Per Task Accuracy: {0: 0.0024036054081121683, 1: 0.028012279355333843, 2: 0.8208452164894996}\n",
      "Validation Loss: 326.7808 - Validation Accuracy: 0.6287\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 6.5995\n",
      "Per Task Accuracy: {0: 0.0012018027040560841, 1: 0.08787413660782808, 2: 0.8205859476276899}\n",
      "Validation Loss: 336.6057 - Validation Accuracy: 0.6250\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 6.2839\n",
      "Per Task Accuracy: {0: 0.00100150225338007, 1: 0.1578408800204656, 2: 0.8322530464091262}\n",
      "Validation Loss: 332.6970 - Validation Accuracy: 0.6262\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 6.7671\n",
      "Per Task Accuracy: {0: 0.003605408112168252, 1: 0.1780506523407521, 2: 0.7762509722582318}\n",
      "Validation Loss: 344.6387 - Validation Accuracy: 0.6172\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 6.6819\n",
      "Per Task Accuracy: {0: 0.0024036054081121683, 1: 0.13200306983883345, 2: 0.7337308789214415}\n",
      "Validation Loss: 347.5388 - Validation Accuracy: 0.6120\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 6.3142\n",
      "Per Task Accuracy: {0: 0.0008012018027040561, 1: 0.06414684062420056, 2: 0.7783251231527094}\n",
      "Validation Loss: 329.7618 - Validation Accuracy: 0.6280\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 9.1162\n",
      "Per Task Accuracy: {0: 0.0008012018027040561, 1: 0.17958557175748274, 2: 0.7658802177858439}\n",
      "Validation Loss: 332.7270 - Validation Accuracy: 0.6260\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 6.9096\n",
      "Per Task Accuracy: {0: 0.005207811717576364, 1: 0.18195190585827578, 2: 0.7093596059113301}\n",
      "Validation Loss: 333.8572 - Validation Accuracy: 0.6175\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 9.2792\n",
      "Per Task Accuracy: {0: 0.0022033049574361543, 1: 0.16155026861089794, 2: 0.7163598651801919}\n",
      "Validation Loss: 331.3070 - Validation Accuracy: 0.6268\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 7.4758\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.07418777180864672, 2: 0.7848068446979518}\n",
      "Validation Loss: 320.0435 - Validation Accuracy: 0.6302\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 7.3217\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.13814274750575595, 2: 0.7892144153487166}\n",
      "Validation Loss: 330.6244 - Validation Accuracy: 0.6227\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 6.5761\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08800204655922231, 2: 0.8633653098262899}\n",
      "Validation Loss: 334.9856 - Validation Accuracy: 0.6237\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 7.2343\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.28837298541826556, 2: 0.6156987295825771}\n",
      "Validation Loss: 333.3120 - Validation Accuracy: 0.6243\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 6.6319\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.2084932207725761, 2: 0.71577651024112}\n",
      "Validation Loss: 339.6859 - Validation Accuracy: 0.6197\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 7.1727\n",
      "Per Task Accuracy: {0: 0.00100150225338007, 1: 0.14031721667945765, 2: 0.6865439460720767}\n",
      "Validation Loss: 337.0789 - Validation Accuracy: 0.6252\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 6.3638\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08819391148631364, 2: 0.8312159709618875}\n",
      "Validation Loss: 335.1410 - Validation Accuracy: 0.6243\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 7.0765\n",
      "Per Task Accuracy: {0: 0.0006009013520280421, 1: 0.24814530570478383, 2: 0.7240731138190304}\n",
      "Validation Loss: 330.8246 - Validation Accuracy: 0.6253\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 6.4971\n",
      "Per Task Accuracy: {0: 0.0006009013520280421, 1: 0.18176004093118445, 2: 0.7194710915219082}\n",
      "Validation Loss: 331.4046 - Validation Accuracy: 0.6255\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 5.7727\n",
      "Per Task Accuracy: {0: 0.0012018027040560841, 1: 0.10782808902532617, 2: 0.7459165154264973}\n",
      "Validation Loss: 344.3943 - Validation Accuracy: 0.6150\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 5.7165\n",
      "Per Task Accuracy: {0: 0.001802704056084126, 1: 0.20209772320286518, 2: 0.6626912107855847}\n",
      "Validation Loss: 338.9789 - Validation Accuracy: 0.6168\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 5.3172\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.07591455615246866, 2: 0.8177339901477833}\n",
      "Validation Loss: 355.1164 - Validation Accuracy: 0.6065\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 5.4011\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.14191609107188538, 2: 0.800881514130153}\n",
      "Validation Loss: 341.0810 - Validation Accuracy: 0.6223\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 6.6574\n",
      "Per Task Accuracy: {0: 0.0008012018027040561, 1: 0.34235098490662574, 2: 0.6076613948664765}\n",
      "Validation Loss: 339.0132 - Validation Accuracy: 0.6182\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 5.2800\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.3083908928114607, 2: 0.6489499611096707}\n",
      "Validation Loss: 347.1096 - Validation Accuracy: 0.6150\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 5.7909\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08627526221540036, 2: 0.7596577651024112}\n",
      "Validation Loss: 336.6987 - Validation Accuracy: 0.6210\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 6.4509\n",
      "Per Task Accuracy: {0: 0.00040060090135202804, 1: 0.3814914300332566, 2: 0.5001944516463572}\n",
      "Validation Loss: 345.1424 - Validation Accuracy: 0.6150\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 6.0098\n",
      "Per Task Accuracy: {0: 0.00200300450676014, 1: 0.394602200051164, 2: 0.4494425719471091}\n",
      "Validation Loss: 344.8414 - Validation Accuracy: 0.6167\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 7.8096\n",
      "Per Task Accuracy: {0: 0.00040060090135202804, 1: 0.48656945510360705, 2: 0.4204044594244231}\n",
      "Validation Loss: 339.4262 - Validation Accuracy: 0.6182\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 7.9617\n",
      "Per Task Accuracy: {0: 0.006009013520280421, 1: 0.7821693527756459, 2: 0.19840549649987035}\n",
      "Validation Loss: 350.5711 - Validation Accuracy: 0.6093\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 5.9053\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.3177283192632387, 2: 0.5322141560798548}\n",
      "Validation Loss: 352.1478 - Validation Accuracy: 0.6082\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 6.4257\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.2375287797390637, 2: 0.6035130930775214}\n",
      "Validation Loss: 342.2606 - Validation Accuracy: 0.6180\n",
      "Loss on test set: 343.1595\n",
      "Accuracy on test set: 0.6180\n",
      "Incremental ARC Classifier added, current len: 4\n",
      "Incremental Task Layer added, current len: 4\n",
      "tsk: 3\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'incremental_layers.2', 'arc_classifiers.0.4', 'arc_classifiers.1.4', 'arc_classifiers.1.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "----Freezing incremental_layers.2.gate_generator.0.weight\n",
      "----Freezing incremental_layers.2.gate_generator.0.bias\n",
      "----Freezing incremental_layers.2.gate_generator.1.weight\n",
      "----Freezing incremental_layers.2.gate_generator.1.bias\n",
      "----Freezing incremental_layers.2.gate_generator.5.weight\n",
      "----Freezing incremental_layers.2.gate_generator.5.bias\n",
      "incremental_layers.3.layer.0.weight\n",
      "incremental_layers.3.layer.0.bias\n",
      "incremental_layers.3.layer.1.weight\n",
      "incremental_layers.3.layer.1.bias\n",
      "incremental_layers.3.layer.3.weight\n",
      "incremental_layers.3.layer.3.bias\n",
      "incremental_layers.3.layer.4.weight\n",
      "incremental_layers.3.layer.4.bias\n",
      "incremental_layers.3.layer.6.weight\n",
      "incremental_layers.3.layer.6.bias\n",
      "incremental_layers.3.layer.7.weight\n",
      "incremental_layers.3.layer.7.bias\n",
      "incremental_layers.3.gate_generator.0.weight\n",
      "incremental_layers.3.gate_generator.0.bias\n",
      "incremental_layers.3.gate_generator.1.weight\n",
      "incremental_layers.3.gate_generator.1.bias\n",
      "incremental_layers.3.gate_generator.5.weight\n",
      "incremental_layers.3.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "arc_classifiers.3.4.weight\n",
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 3.69998631 36.99863105 36.99863105  1.47999781]\n",
      "num_prev_task_classes: 60\n",
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 32.02097104 320.19904778 320.19904778  12.80841685]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e863f34070a64fa792d89793da6f60ca",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1740 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 26.3066\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06766436428754156, 2: 0.08322530464091263, 3: 0.7508753029894963}\n",
      "Validation Loss: 359.7096 - Validation Accuracy: 0.5622\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 20.4387\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05973394730110002, 2: 0.0850401866735805, 3: 0.7304740102343119}\n",
      "Validation Loss: 359.1737 - Validation Accuracy: 0.5703\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 19.1099\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03581478639038117, 2: 0.07907700285195748, 3: 0.7096687314839752}\n",
      "Validation Loss: 364.2541 - Validation Accuracy: 0.5663\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 17.7359\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.037733435661294445, 2: 0.09100337049520353, 3: 0.7122946404524643}\n",
      "Validation Loss: 368.4592 - Validation Accuracy: 0.5694\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 16.2599\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03766948068559734, 2: 0.07129893699766658, 3: 0.7554538109345542}\n",
      "Validation Loss: 373.3450 - Validation Accuracy: 0.5689\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 16.3463\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0678562292146329, 2: 0.1236712470832253, 3: 0.69687584163749}\n",
      "Validation Loss: 367.9674 - Validation Accuracy: 0.5711\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 15.0200\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.11383985674085444, 2: 0.1319678506611356, 3: 0.6338540263937517}\n",
      "Validation Loss: 363.6861 - Validation Accuracy: 0.5757\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 15.0323\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.10367101560501407, 2: 0.1335234638319938, 3: 0.6595071370859144}\n",
      "Validation Loss: 374.4665 - Validation Accuracy: 0.5680\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 14.1533\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06548989511383986, 2: 0.09333679025149079, 3: 0.7249528683005656}\n",
      "Validation Loss: 369.7001 - Validation Accuracy: 0.5774\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 13.2329\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.053786134561268864, 2: 0.08115115374643506, 3: 0.7842041475895503}\n",
      "Validation Loss: 366.5816 - Validation Accuracy: 0.5837\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 13.4239\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.075530826298286, 2: 0.07544723878662173, 3: 0.7683813627794236}\n",
      "Validation Loss: 372.0853 - Validation Accuracy: 0.5755\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 13.9310\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.13775901765157328, 2: 0.08400311122634171, 3: 0.7446808510638298}\n",
      "Validation Loss: 374.1821 - Validation Accuracy: 0.5771\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 13.2176\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.09574059861857252, 2: 0.09929997407311382, 3: 0.7219229733369243}\n",
      "Validation Loss: 373.9013 - Validation Accuracy: 0.5772\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 13.9020\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06158864159631619, 2: 0.11563391236712471, 3: 0.6782924858604902}\n",
      "Validation Loss: 377.9554 - Validation Accuracy: 0.5726\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 13.9979\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.07546687132258889, 2: 0.08944775732434534, 3: 0.7448828440614059}\n",
      "Validation Loss: 377.2056 - Validation Accuracy: 0.5740\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 13.1853\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.12579943719621386, 2: 0.11330049261083744, 3: 0.6821303528144358}\n",
      "Validation Loss: 374.8225 - Validation Accuracy: 0.5783\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 12.8755\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.1274622665643387, 2: 0.10759657765102411, 3: 0.6919606786964718}\n",
      "Validation Loss: 367.3105 - Validation Accuracy: 0.5849\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 12.8807\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06753645433614736, 2: 0.08659579984443869, 3: 0.8494478858066253}\n",
      "Validation Loss: 373.3243 - Validation Accuracy: 0.5782\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 11.8111\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.04521616781785623, 2: 0.05963183821623023, 3: 0.8007675733907891}\n",
      "Validation Loss: 368.7490 - Validation Accuracy: 0.5885\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 12.7091\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06318751598874392, 2: 0.07363235675395385, 3: 0.7046862375437651}\n",
      "Validation Loss: 371.3203 - Validation Accuracy: 0.5805\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 12.9277\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08953696597595293, 2: 0.10189266269121079, 3: 0.6488015082143819}\n",
      "Validation Loss: 382.5952 - Validation Accuracy: 0.5686\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 13.4282\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.057879253005883854, 2: 0.0852994555353902, 3: 0.7842714785887422}\n",
      "Validation Loss: 368.6251 - Validation Accuracy: 0.5806\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 12.1029\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03549501151189562, 2: 0.05729841845994296, 3: 0.7993536224077565}\n",
      "Validation Loss: 375.8635 - Validation Accuracy: 0.5769\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 11.0949\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.1017523663341008, 2: 0.10707803992740472, 3: 0.7213843253433881}\n",
      "Validation Loss: 374.8739 - Validation Accuracy: 0.5848\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 11.3337\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.09778715784088002, 2: 0.06740990407052114, 3: 0.8132911392405063}\n",
      "Validation Loss: 370.9151 - Validation Accuracy: 0.5862\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 11.3045\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.07949603479150678, 2: 0.08970702618615504, 3: 0.7173444653918665}\n",
      "Validation Loss: 365.1507 - Validation Accuracy: 0.5863\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 11.5576\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.1378229726272704, 2: 0.16100596318382163, 3: 0.6213977915432265}\n",
      "Validation Loss: 369.4498 - Validation Accuracy: 0.5865\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 11.6127\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.037733435661294445, 2: 0.09022556390977443, 3: 0.8161863722057635}\n",
      "Validation Loss: 373.8038 - Validation Accuracy: 0.5882\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 11.4154\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0574955231517012, 2: 0.1617837697692507, 3: 0.6185698895771613}\n",
      "Validation Loss: 369.1715 - Validation Accuracy: 0.5832\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 11.7714\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05148375543617294, 2: 0.08374384236453201, 3: 0.8142337732291948}\n",
      "Validation Loss: 367.5873 - Validation Accuracy: 0.5846\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 10.7157\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05730365822460987, 2: 0.07155820585947628, 3: 0.7583490438998115}\n",
      "Validation Loss: 370.3082 - Validation Accuracy: 0.5845\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 11.2272\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06951905858275774, 2: 0.10992999740731138, 3: 0.692431995690816}\n",
      "Validation Loss: 377.7209 - Validation Accuracy: 0.5788\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 10.8070\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05359426963417754, 2: 0.10759657765102411, 3: 0.7106786964718557}\n",
      "Validation Loss: 369.3301 - Validation Accuracy: 0.5865\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 10.2438\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.049501151189562546, 2: 0.06948405496499871, 3: 0.7640721788311339}\n",
      "Validation Loss: 377.9776 - Validation Accuracy: 0.5766\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 10.7661\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.1177411102583781, 2: 0.14830178895514648, 3: 0.5869243199569082}\n",
      "Validation Loss: 378.6497 - Validation Accuracy: 0.5802\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 10.3727\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.11748529035558966, 2: 0.13741249675913922, 3: 0.5859816859682198}\n",
      "Validation Loss: 370.4630 - Validation Accuracy: 0.5885\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 11.4961\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08365310821181889, 2: 0.1532278973295307, 3: 0.5669943441960679}\n",
      "Validation Loss: 379.5123 - Validation Accuracy: 0.5783\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 11.3556\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06964696853415196, 2: 0.08918848846253564, 3: 0.6667115539994614}\n",
      "Validation Loss: 366.7875 - Validation Accuracy: 0.5829\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 10.9496\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08947301100025581, 2: 0.11744879439979258, 3: 0.6044303797468354}\n",
      "Validation Loss: 369.3853 - Validation Accuracy: 0.5837\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 10.4006\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08537989255564082, 2: 0.11148561057816957, 3: 0.6521007271747913}\n",
      "Validation Loss: 373.9603 - Validation Accuracy: 0.5826\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 10.3357\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.11160143259145562, 2: 0.1724137931034483, 3: 0.5749394021007271}\n",
      "Validation Loss: 372.9257 - Validation Accuracy: 0.5823\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 10.2895\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.04144282425172678, 2: 0.1161524500907441, 3: 0.7230002693239967}\n",
      "Validation Loss: 367.8640 - Validation Accuracy: 0.5883\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 10.1781\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05359426963417754, 2: 0.1236712470832253, 3: 0.7143819014274172}\n",
      "Validation Loss: 369.8447 - Validation Accuracy: 0.5858\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 10.4999\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.07143770785367101, 2: 0.1717008037334716, 3: 0.6878534877457582}\n",
      "Validation Loss: 376.6871 - Validation Accuracy: 0.5848\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 9.8393\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.12343310309542083, 2: 0.15841327456572465, 3: 0.6624697010503636}\n",
      "Validation Loss: 384.1350 - Validation Accuracy: 0.5811\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 10.4174\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.13213097979022767, 2: 0.11926367643246046, 3: 0.7050902235389174}\n",
      "Validation Loss: 370.5767 - Validation Accuracy: 0.5846\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 10.6349\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.07943207981580967, 2: 0.1457091003370495, 3: 0.6119041206571506}\n",
      "Validation Loss: 372.8759 - Validation Accuracy: 0.5834\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 10.9832\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.07975185469429522, 2: 0.14467202488981074, 3: 0.7552518179369782}\n",
      "Validation Loss: 377.9293 - Validation Accuracy: 0.5802\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 9.4995\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03357636224098235, 2: 0.08141042260824476, 3: 0.836520333961756}\n",
      "Validation Loss: 375.6974 - Validation Accuracy: 0.5846\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 9.5458\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.061524686620619086, 2: 0.12185636505055743, 3: 0.7124966334500404}\n",
      "Validation Loss: 378.5359 - Validation Accuracy: 0.5829\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 10.3845\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06542594013814275, 2: 0.08711433756805807, 3: 0.7205763533530838}\n",
      "Validation Loss: 371.8078 - Validation Accuracy: 0.5860\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 9.3659\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.10136863647991813, 2: 0.0619652579725175, 3: 0.7461621330460544}\n",
      "Validation Loss: 378.1580 - Validation Accuracy: 0.5874\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 10.1373\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.1114735226400614, 2: 0.17215452424163857, 3: 0.5976972798276327}\n",
      "Validation Loss: 374.3974 - Validation Accuracy: 0.5829\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 9.6434\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06747249936045024, 2: 0.12600466683951259, 3: 0.651360086183679}\n",
      "Validation Loss: 373.8120 - Validation Accuracy: 0.5800\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 10.0478\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05941417242261448, 2: 0.11589318122893441, 3: 0.7324266092108807}\n",
      "Validation Loss: 377.8020 - Validation Accuracy: 0.5828\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 9.5106\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.06964696853415196, 2: 0.11770806326160228, 3: 0.6842176137893886}\n",
      "Validation Loss: 377.6602 - Validation Accuracy: 0.5838\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 9.9718\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05359426963417754, 2: 0.10163339382940109, 3: 0.7244815513062214}\n",
      "Validation Loss: 368.6793 - Validation Accuracy: 0.5911\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 9.4516\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.117677155282681, 2: 0.21623023074928702, 3: 0.5610692162671694}\n",
      "Validation Loss: 366.4003 - Validation Accuracy: 0.5895\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 9.9669\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.09151957022256331, 2: 0.11848586984703137, 3: 0.6562079181255049}\n",
      "Validation Loss: 375.9587 - Validation Accuracy: 0.5829\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 8.7991\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.08787413660782808, 2: 0.13378273269380347, 3: 0.6771478588742257}\n",
      "Validation Loss: 371.5810 - Validation Accuracy: 0.5878\n",
      "Loss on test set: 373.1442\n",
      "Accuracy on test set: 0.5878\n",
      "Incremental ARC Classifier added, current len: 5\n",
      "Incremental Task Layer added, current len: 5\n",
      "tsk: 4\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'incremental_layers.2', 'incremental_layers.3', 'arc_classifiers.0.4', 'arc_classifiers.1.4', 'arc_classifiers.2.4', 'arc_classifiers.2.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "----Freezing incremental_layers.2.gate_generator.0.weight\n",
      "----Freezing incremental_layers.2.gate_generator.0.bias\n",
      "----Freezing incremental_layers.2.gate_generator.1.weight\n",
      "----Freezing incremental_layers.2.gate_generator.1.bias\n",
      "----Freezing incremental_layers.2.gate_generator.5.weight\n",
      "----Freezing incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing incremental_layers.3.layer.0.weight\n",
      "----Freezing incremental_layers.3.layer.0.bias\n",
      "----Freezing incremental_layers.3.layer.1.weight\n",
      "----Freezing incremental_layers.3.layer.1.bias\n",
      "----Freezing incremental_layers.3.layer.3.weight\n",
      "----Freezing incremental_layers.3.layer.3.bias\n",
      "----Freezing incremental_layers.3.layer.4.weight\n",
      "----Freezing incremental_layers.3.layer.4.bias\n",
      "----Freezing incremental_layers.3.layer.6.weight\n",
      "----Freezing incremental_layers.3.layer.6.bias\n",
      "----Freezing incremental_layers.3.layer.7.weight\n",
      "----Freezing incremental_layers.3.layer.7.bias\n",
      "----Freezing incremental_layers.3.gate_generator.0.weight\n",
      "----Freezing incremental_layers.3.gate_generator.0.bias\n",
      "----Freezing incremental_layers.3.gate_generator.1.weight\n",
      "----Freezing incremental_layers.3.gate_generator.1.bias\n",
      "----Freezing incremental_layers.3.gate_generator.5.weight\n",
      "----Freezing incremental_layers.3.gate_generator.5.bias\n",
      "incremental_layers.4.layer.0.weight\n",
      "incremental_layers.4.layer.0.bias\n",
      "incremental_layers.4.layer.1.weight\n",
      "incremental_layers.4.layer.1.bias\n",
      "incremental_layers.4.layer.3.weight\n",
      "incremental_layers.4.layer.3.bias\n",
      "incremental_layers.4.layer.4.weight\n",
      "incremental_layers.4.layer.4.bias\n",
      "incremental_layers.4.layer.6.weight\n",
      "incremental_layers.4.layer.6.bias\n",
      "incremental_layers.4.layer.7.weight\n",
      "incremental_layers.4.layer.7.bias\n",
      "incremental_layers.4.gate_generator.0.weight\n",
      "incremental_layers.4.gate_generator.0.bias\n",
      "incremental_layers.4.gate_generator.1.weight\n",
      "incremental_layers.4.gate_generator.1.bias\n",
      "incremental_layers.4.gate_generator.5.weight\n",
      "incremental_layers.4.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "arc_classifiers.3.4.weight\n",
      "arc_classifiers.4.2.weight\n",
      "arc_classifiers.4.2.bias\n",
      "arc_classifiers.4.4.weight\n",
      "alpha_cls: [189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      "   7.59994224   7.59994224   7.59994224   7.59994224   7.59994224]\n",
      "alpha_tsk: [ 3.79998556 37.99855605 37.99855605 37.99855605  1.51999769]\n",
      "num_prev_task_classes: 65\n",
      "alpha_cls: [189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      "   7.59994224   7.59994224   7.59994224   7.59994224   7.59994224]\n",
      "alpha_tsk: [ 28.17574613 281.74782555 281.74782555 281.74782555  11.27032415]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e8078114538045dd9ba7fd4ef4f256e3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1800 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 31.6933\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.006011767715528268, 2: 0.00622245268343272, 3: 0.2653514678157824, 4: 0.9416088765603329}\n",
      "Validation Loss: 401.8199 - Validation Accuracy: 0.5261\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 25.3254\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.007994371962138654, 2: 0.018148820326678767, 3: 0.20737947751144628, 4: 0.9407766990291262}\n",
      "Validation Loss: 405.3772 - Validation Accuracy: 0.5316\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 23.4331\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.001918649270913277, 2: 0.010370754472387866, 3: 0.245219499057366, 4: 0.9513176144244105}\n",
      "Validation Loss: 401.2739 - Validation Accuracy: 0.5359\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 22.2256\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.002074150894477573, 3: 0.3168596821976838, 4: 0.9332177531206658}\n",
      "Validation Loss: 416.1579 - Validation Accuracy: 0.5306\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 20.9435\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.002074150894477573, 3: 0.2653514678157824, 4: 0.9596393897364771}\n",
      "Validation Loss: 416.5600 - Validation Accuracy: 0.5307\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 20.6727\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0020465592223074956, 2: 0.002074150894477573, 3: 0.31524373821707513, 4: 0.9490984743411928}\n",
      "Validation Loss: 411.3013 - Validation Accuracy: 0.5394\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 19.9195\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.002074150894477573, 3: 0.2512119579854565, 4: 0.9700416088765603}\n",
      "Validation Loss: 407.6883 - Validation Accuracy: 0.5476\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 18.8654\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.00622245268343272, 3: 0.29793967142472394, 4: 0.9581137309292649}\n",
      "Validation Loss: 412.5151 - Validation Accuracy: 0.5383\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 20.2882\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.010104886160143258, 2: 0.00622245268343272, 3: 0.30776999730676, 4: 0.9366158113730929}\n",
      "Validation Loss: 414.7786 - Validation Accuracy: 0.5406\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 18.3091\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.008058326937835763, 2: 0.00622245268343272, 3: 0.22246162133046055, 4: 0.9447988904299583}\n",
      "Validation Loss: 405.1061 - Validation Accuracy: 0.5456\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 18.1342\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0020465592223074956, 2: 0.0, 3: 0.15371667115539994, 4: 0.9757975034674063}\n",
      "Validation Loss: 416.7528 - Validation Accuracy: 0.5383\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 17.6089\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.012023535431056536, 2: 0.0, 3: 0.1687988149744142, 4: 0.9790568654646324}\n",
      "Validation Loss: 403.6633 - Validation Accuracy: 0.5500\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 17.8981\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.004093118444614991, 2: 0.0, 3: 0.17048208995421493, 4: 0.9879334257975034}\n",
      "Validation Loss: 402.3935 - Validation Accuracy: 0.5501\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 17.3322\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.004093118444614991, 2: 0.008296603577910292, 3: 0.2988823054134123, 4: 0.9366158113730929}\n",
      "Validation Loss: 409.8766 - Validation Accuracy: 0.5419\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 18.9967\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.004029163468917882, 2: 0.008296603577910292, 3: 0.4708456773498519, 4: 0.7843273231622746}\n",
      "Validation Loss: 404.5206 - Validation Accuracy: 0.5509\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 18.0540\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0059631838216230235, 3: 0.30272017236735793, 4: 0.943619972260749}\n",
      "Validation Loss: 403.2741 - Validation Accuracy: 0.5499\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 18.8306\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.014070094653364031, 2: 0.0077780658542909, 3: 0.30655803932130354, 4: 0.918377253814147}\n",
      "Validation Loss: 404.4618 - Validation Accuracy: 0.5536\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 16.5598\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.004093118444614991, 2: 0.002074150894477573, 3: 0.33793428494478855, 4: 0.9192787794729542}\n",
      "Validation Loss: 401.5151 - Validation Accuracy: 0.5560\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 17.1597\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0020465592223074956, 2: 0.0, 3: 0.2868973875572314, 4: 0.9570735090152566}\n",
      "Validation Loss: 410.5171 - Validation Accuracy: 0.5474\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 17.1127\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.015988743924277307, 2: 0.00388903292714545, 3: 0.33456773498518716, 4: 0.9212898751733704}\n",
      "Validation Loss: 408.6605 - Validation Accuracy: 0.5459\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 16.5939\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.022000511639805577, 2: 0.00388903292714545, 3: 0.3084433072986803, 4: 0.9011789181692095}\n",
      "Validation Loss: 403.1010 - Validation Accuracy: 0.5517\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 16.4533\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.002074150894477573, 3: 0.30797199030433614, 4: 0.929126213592233}\n",
      "Validation Loss: 409.0181 - Validation Accuracy: 0.5484\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 17.0898\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.003965208493220773, 2: 0.002074150894477573, 3: 0.23229194721249663, 4: 0.9556865464632455}\n",
      "Validation Loss: 409.0144 - Validation Accuracy: 0.5496\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 15.7472\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.010104886160143258, 2: 0.005703914959813326, 3: 0.257204416913547, 4: 0.929126213592233}\n",
      "Validation Loss: 411.3347 - Validation Accuracy: 0.5427\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 17.9383\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.028012279355333843, 2: 0.002074150894477573, 3: 0.4148936170212766, 4: 0.84750346740638}\n",
      "Validation Loss: 416.0875 - Validation Accuracy: 0.5413\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 15.8450\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.014006139677666922, 2: 0.002074150894477573, 3: 0.29672771343926746, 4: 0.8787794729542302}\n",
      "Validation Loss: 411.5689 - Validation Accuracy: 0.5457\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 16.1238\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.04022767971348171, 2: 0.011926367643246047, 3: 0.312550498249394, 4: 0.8929958391123439}\n",
      "Validation Loss: 418.0548 - Validation Accuracy: 0.5421\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 15.9182\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0061396776669224865, 2: 0.008296603577910292, 3: 0.3765149474818206, 4: 0.8795423023578364}\n",
      "Validation Loss: 417.1403 - Validation Accuracy: 0.5437\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 16.1420\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.005703914959813326, 3: 0.42418529490977647, 4: 0.823509015256588}\n",
      "Validation Loss: 415.4594 - Validation Accuracy: 0.5456\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 16.3167\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0077780658542909, 3: 0.3487072448155131, 4: 0.898127600554785}\n",
      "Validation Loss: 414.4744 - Validation Accuracy: 0.5410\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 16.7885\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.010104886160143258, 2: 0.009852216748768473, 3: 0.3250067330999192, 4: 0.8785714285714286}\n",
      "Validation Loss: 412.1518 - Validation Accuracy: 0.5453\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 16.2329\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.016180608851368638, 2: 0.01607466943220119, 3: 0.4895636951252357, 4: 0.7577669902912622}\n",
      "Validation Loss: 404.9141 - Validation Accuracy: 0.5514\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 16.1406\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.008122281913532873, 2: 0.00622245268343272, 3: 0.35375706975491517, 4: 0.9175450762829404}\n",
      "Validation Loss: 401.9966 - Validation Accuracy: 0.5499\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 15.0690\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.022256331542594012, 2: 0.014000518537723619, 3: 0.36715593859412876, 4: 0.8590846047156727}\n",
      "Validation Loss: 406.8492 - Validation Accuracy: 0.5504\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 15.4460\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.034024047070862116, 2: 0.0059631838216230235, 3: 0.367896579585241, 4: 0.8698335644937587}\n",
      "Validation Loss: 410.7703 - Validation Accuracy: 0.5477\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 15.9447\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03607060629316961, 2: 0.01788955146486907, 3: 0.35469970374360354, 4: 0.8558945908460471}\n",
      "Validation Loss: 410.1724 - Validation Accuracy: 0.5509\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 14.6201\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.02193655666410847, 2: 0.009852216748768473, 3: 0.35948020468623754, 4: 0.8715672676837726}\n",
      "Validation Loss: 414.8730 - Validation Accuracy: 0.5441\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 15.1473\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.01004093118444615, 2: 0.009852216748768473, 3: 0.403851333153784, 4: 0.8650485436893204}\n",
      "Validation Loss: 414.5535 - Validation Accuracy: 0.5436\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 15.0029\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.032041442824251726, 2: 0.011926367643246047, 3: 0.35039051979531377, 4: 0.9047850208044382}\n",
      "Validation Loss: 408.9218 - Validation Accuracy: 0.5486\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 15.7736\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.02411102583781018, 2: 0.011926367643246047, 3: 0.3573256127120926, 4: 0.9086685159500694}\n",
      "Validation Loss: 408.3751 - Validation Accuracy: 0.5491\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 15.3211\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.02001790739319519, 2: 0.0077780658542909, 3: 0.3283732830595206, 4: 0.9081830790568655}\n",
      "Validation Loss: 410.5777 - Validation Accuracy: 0.5466\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 15.9529\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.02602967510872346, 2: 0.009852216748768473, 3: 0.37294640452464317, 4: 0.8720527045769764}\n",
      "Validation Loss: 407.3405 - Validation Accuracy: 0.5500\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 15.0856\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.026093630084420567, 2: 0.011926367643246047, 3: 0.295785079450579, 4: 0.933009708737864}\n",
      "Validation Loss: 409.0363 - Validation Accuracy: 0.5481\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 15.4722\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0059631838216230235, 3: 0.37079181255049826, 4: 0.871497919556172}\n",
      "Validation Loss: 410.5760 - Validation Accuracy: 0.5487\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 15.5260\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.04611153747761576, 2: 0.02022297122115634, 3: 0.3908564503097226, 4: 0.7834257975034674}\n",
      "Validation Loss: 407.5374 - Validation Accuracy: 0.5511\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 14.6703\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.014134049629061141, 2: 0.01607466943220119, 3: 0.3969162402370051, 4: 0.8278779472954231}\n",
      "Validation Loss: 408.2112 - Validation Accuracy: 0.5536\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 16.0725\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03632642619595804, 2: 0.014000518537723619, 3: 0.38755723134931325, 4: 0.8355755894590846}\n",
      "Validation Loss: 411.7226 - Validation Accuracy: 0.5501\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 16.1889\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03223330775134305, 2: 0.01011148561057817, 3: 0.3477646108268247, 4: 0.9006934812760056}\n",
      "Validation Loss: 404.8866 - Validation Accuracy: 0.5439\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 14.8079\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.05800716295727808, 2: 0.014000518537723619, 3: 0.44431726366819285, 4: 0.8036754507628294}\n",
      "Validation Loss: 411.2372 - Validation Accuracy: 0.5436\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 14.8543\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.024047070862113072, 2: 0.011926367643246047, 3: 0.4060059251279289, 4: 0.8603328710124827}\n",
      "Validation Loss: 409.8006 - Validation Accuracy: 0.5514\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 15.3986\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.022128421591199796, 2: 0.005703914959813326, 3: 0.36500134661998385, 4: 0.9095700416088766}\n",
      "Validation Loss: 411.0348 - Validation Accuracy: 0.5471\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 14.2664\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.026093630084420567, 2: 0.0059631838216230235, 3: 0.43785348774575816, 4: 0.8325242718446602}\n",
      "Validation Loss: 409.4630 - Validation Accuracy: 0.5467\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 14.2040\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.026093630084420567, 2: 0.014000518537723619, 3: 0.5190546727713439, 4: 0.7226074895977809}\n",
      "Validation Loss: 410.1779 - Validation Accuracy: 0.5483\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 14.5536\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03012279355333845, 2: 0.008296603577910292, 3: 0.4112577430649071, 4: 0.84375866851595}\n",
      "Validation Loss: 410.6345 - Validation Accuracy: 0.5527\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 14.7613\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0242389357892044, 2: 0.014000518537723619, 3: 0.3712631295448424, 4: 0.860124826629681}\n",
      "Validation Loss: 410.2276 - Validation Accuracy: 0.5487\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 15.3966\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0401637247377846, 2: 0.014000518537723619, 3: 0.3872879073525451, 4: 0.8504160887656034}\n",
      "Validation Loss: 412.1433 - Validation Accuracy: 0.5444\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 14.3476\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03818112049117421, 2: 0.02022297122115634, 3: 0.42081874495017507, 4: 0.7993065187239945}\n",
      "Validation Loss: 409.9284 - Validation Accuracy: 0.5536\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 14.6784\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03216935277564595, 2: 0.010370754472387866, 3: 0.3211688661459736, 4: 0.9250346740638002}\n",
      "Validation Loss: 416.7715 - Validation Accuracy: 0.5440\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 15.0704\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.03619851624456383, 2: 0.016333938294010888, 3: 0.5373013735523835, 4: 0.6798890429958391}\n",
      "Validation Loss: 404.8223 - Validation Accuracy: 0.5527\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 15.2097\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.022064466615502685, 2: 0.01011148561057817, 3: 0.4117290600592513, 4: 0.8585298196948682}\n",
      "Validation Loss: 410.0385 - Validation Accuracy: 0.5466\n",
      "Loss on test set: 412.3287\n",
      "Accuracy on test set: 0.5466\n",
      "Incremental ARC Classifier added, current len: 6\n",
      "Incremental Task Layer added, current len: 6\n",
      "tsk: 5\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'incremental_layers.2', 'incremental_layers.3', 'incremental_layers.4', 'arc_classifiers.0.4', 'arc_classifiers.1.4', 'arc_classifiers.2.4', 'arc_classifiers.3.4', 'arc_classifiers.3.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "----Freezing incremental_layers.2.gate_generator.0.weight\n",
      "----Freezing incremental_layers.2.gate_generator.0.bias\n",
      "----Freezing incremental_layers.2.gate_generator.1.weight\n",
      "----Freezing incremental_layers.2.gate_generator.1.bias\n",
      "----Freezing incremental_layers.2.gate_generator.5.weight\n",
      "----Freezing incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing incremental_layers.3.layer.0.weight\n",
      "----Freezing incremental_layers.3.layer.0.bias\n",
      "----Freezing incremental_layers.3.layer.1.weight\n",
      "----Freezing incremental_layers.3.layer.1.bias\n",
      "----Freezing incremental_layers.3.layer.3.weight\n",
      "----Freezing incremental_layers.3.layer.3.bias\n",
      "----Freezing incremental_layers.3.layer.4.weight\n",
      "----Freezing incremental_layers.3.layer.4.bias\n",
      "----Freezing incremental_layers.3.layer.6.weight\n",
      "----Freezing incremental_layers.3.layer.6.bias\n",
      "----Freezing incremental_layers.3.layer.7.weight\n",
      "----Freezing incremental_layers.3.layer.7.bias\n",
      "----Freezing incremental_layers.3.gate_generator.0.weight\n",
      "----Freezing incremental_layers.3.gate_generator.0.bias\n",
      "----Freezing incremental_layers.3.gate_generator.1.weight\n",
      "----Freezing incremental_layers.3.gate_generator.1.bias\n",
      "----Freezing incremental_layers.3.gate_generator.5.weight\n",
      "----Freezing incremental_layers.3.gate_generator.5.bias\n",
      "----Freezing incremental_layers.4.layer.0.weight\n",
      "----Freezing incremental_layers.4.layer.0.bias\n",
      "----Freezing incremental_layers.4.layer.1.weight\n",
      "----Freezing incremental_layers.4.layer.1.bias\n",
      "----Freezing incremental_layers.4.layer.3.weight\n",
      "----Freezing incremental_layers.4.layer.3.bias\n",
      "----Freezing incremental_layers.4.layer.4.weight\n",
      "----Freezing incremental_layers.4.layer.4.bias\n",
      "----Freezing incremental_layers.4.layer.6.weight\n",
      "----Freezing incremental_layers.4.layer.6.bias\n",
      "----Freezing incremental_layers.4.layer.7.weight\n",
      "----Freezing incremental_layers.4.layer.7.bias\n",
      "----Freezing incremental_layers.4.gate_generator.0.weight\n",
      "----Freezing incremental_layers.4.gate_generator.0.bias\n",
      "----Freezing incremental_layers.4.gate_generator.1.weight\n",
      "----Freezing incremental_layers.4.gate_generator.1.bias\n",
      "----Freezing incremental_layers.4.gate_generator.5.weight\n",
      "----Freezing incremental_layers.4.gate_generator.5.bias\n",
      "incremental_layers.5.layer.0.weight\n",
      "incremental_layers.5.layer.0.bias\n",
      "incremental_layers.5.layer.1.weight\n",
      "incremental_layers.5.layer.1.bias\n",
      "incremental_layers.5.layer.3.weight\n",
      "incremental_layers.5.layer.3.bias\n",
      "incremental_layers.5.layer.4.weight\n",
      "incremental_layers.5.layer.4.bias\n",
      "incremental_layers.5.layer.6.weight\n",
      "incremental_layers.5.layer.6.bias\n",
      "incremental_layers.5.layer.7.weight\n",
      "incremental_layers.5.layer.7.bias\n",
      "incremental_layers.5.gate_generator.0.weight\n",
      "incremental_layers.5.gate_generator.0.bias\n",
      "incremental_layers.5.gate_generator.1.weight\n",
      "incremental_layers.5.gate_generator.1.bias\n",
      "incremental_layers.5.gate_generator.5.weight\n",
      "incremental_layers.5.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "----Freezing arc_classifiers.3.4.weight\n",
      "----Freezing arc_classifiers.3.4.weight\n",
      "arc_classifiers.4.2.weight\n",
      "arc_classifiers.4.2.bias\n",
      "arc_classifiers.4.4.weight\n",
      "arc_classifiers.5.2.weight\n",
      "arc_classifiers.5.2.bias\n",
      "arc_classifiers.5.4.weight\n",
      "alpha_cls: [194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      "   7.79993916   7.79993916   7.79993916   7.79993916   7.79993916]\n",
      "alpha_tsk: [ 3.89998479 38.99847906 38.99847906 38.99847906 38.99847906  1.55999757]\n",
      "num_prev_task_classes: 70\n",
      "alpha_cls: [194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      " 194.96198241 194.96198241 194.96198241 194.96198241 194.96198241\n",
      "   7.79993916   7.79993916   7.79993916   7.79993916   7.79993916]\n",
      "alpha_tsk: [ 26.44792715 264.4699886  264.4699886  264.4699886  264.4699886\n",
      "  10.57919561]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3979f4ace9204ae09eaa5718dfb40e8e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1860 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 50.1846\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0020465592223074956, 2: 0.0, 3: 0.0021545919741448962, 4: 0.2326629680998613, 5: 0.9811774695726012}\n",
      "Validation Loss: 423.2876 - Validation Accuracy: 0.5135\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 37.9374\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.023717059639389738, 5: 1.0}\n",
      "Validation Loss: 433.2133 - Validation Accuracy: 0.5043\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 32.5604\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.017753120665742025, 5: 1.0}\n",
      "Validation Loss: 423.2653 - Validation Accuracy: 0.5089\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 31.7311\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.05048543689320388, 5: 0.9977356354373054}\n",
      "Validation Loss: 425.0698 - Validation Accuracy: 0.5144\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 29.5820\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0020465592223074956, 2: 0.0, 3: 0.0021545919741448962, 4: 0.06678224687933426, 5: 0.9857061986979904}\n",
      "Validation Loss: 432.8174 - Validation Accuracy: 0.5144\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 28.8955\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0645631067961165, 5: 1.0}\n",
      "Validation Loss: 428.1144 - Validation Accuracy: 0.5165\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 29.8567\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0020465592223074956, 2: 0.0, 3: 0.0038378669539455967, 4: 0.2007628294036061, 5: 0.9796207189357486}\n",
      "Validation Loss: 430.5458 - Validation Accuracy: 0.5156\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 28.5545\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0021545919741448962, 4: 0.18730929264909849, 5: 0.9782054910840645}\n",
      "Validation Loss: 427.0268 - Validation Accuracy: 0.5161\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 27.8786\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.002074150894477573, 3: 0.006463775922434689, 4: 0.15693481276005547, 5: 0.9811774695726012}\n",
      "Validation Loss: 429.3439 - Validation Accuracy: 0.5140\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 27.2884\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.005963938973647711, 5: 1.0}\n",
      "Validation Loss: 425.7491 - Validation Accuracy: 0.5228\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 26.7420\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.9977356354373054}\n",
      "Validation Loss: 425.5394 - Validation Accuracy: 0.5212\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 26.9313\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0021545919741448962, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 437.6680 - Validation Accuracy: 0.5145\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 25.7651\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0016832749798007002, 4: 0.0004854368932038835, 5: 0.9941268044155109}\n",
      "Validation Loss: 427.5757 - Validation Accuracy: 0.5207\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 26.0704\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.005049824939402101, 4: 0.010402219140083218, 5: 0.9986555335409001}\n",
      "Validation Loss: 431.2338 - Validation Accuracy: 0.5136\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 26.4123\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.002074150894477573, 3: 0.01629410180447078, 4: 0.16497919556171983, 5: 0.964619303707897}\n",
      "Validation Loss: 428.2369 - Validation Accuracy: 0.5235\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 25.1340\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0059924589280904925, 4: 0.008183079056865465, 5: 0.9924992923860742}\n",
      "Validation Loss: 424.4226 - Validation Accuracy: 0.5229\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 24.0252\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0015256588072122053, 5: 1.0}\n",
      "Validation Loss: 426.0221 - Validation Accuracy: 0.5216\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 24.9952\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.046185852981969484, 5: 0.9932069063119162}\n",
      "Validation Loss: 436.8587 - Validation Accuracy: 0.5203\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 25.1630\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 431.6805 - Validation Accuracy: 0.5191\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 23.2587\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.9986555335409001}\n",
      "Validation Loss: 435.8260 - Validation Accuracy: 0.5137\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 23.3560\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0059924589280904925, 4: 0.0, 5: 0.9986555335409001}\n",
      "Validation Loss: 428.5441 - Validation Accuracy: 0.5208\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 26.1921\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0059924589280904925, 4: 0.013106796116504855, 5: 0.9977356354373054}\n",
      "Validation Loss: 428.0420 - Validation Accuracy: 0.5167\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 24.9667\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 433.6626 - Validation Accuracy: 0.5172\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 24.4062\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 436.7637 - Validation Accuracy: 0.5172\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 23.0379\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.9970280215114633}\n",
      "Validation Loss: 429.1732 - Validation Accuracy: 0.5199\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 24.2732\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 423.0487 - Validation Accuracy: 0.5235\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 23.3046\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 433.0836 - Validation Accuracy: 0.5185\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 23.2321\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 439.6683 - Validation Accuracy: 0.5173\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 23.4792\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.008183079056865465, 5: 1.0}\n",
      "Validation Loss: 435.9685 - Validation Accuracy: 0.5189\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 24.2064\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 430.4632 - Validation Accuracy: 0.5188\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 23.8950\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.003744798890429958, 5: 1.0}\n",
      "Validation Loss: 439.7507 - Validation Accuracy: 0.5141\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 22.1469\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 1.0}\n",
      "Validation Loss: 427.8658 - Validation Accuracy: 0.5223\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 22.1263\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 428.1954 - Validation Accuracy: 0.5219\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 23.2964\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 430.1465 - Validation Accuracy: 0.5228\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 23.3688\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.15610263522884882, 5: 0.9635578828191339}\n",
      "Validation Loss: 427.3974 - Validation Accuracy: 0.5237\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 21.9058\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 1.0}\n",
      "Validation Loss: 430.5256 - Validation Accuracy: 0.5203\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 21.9145\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 434.0872 - Validation Accuracy: 0.5196\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 21.4255\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 430.5657 - Validation Accuracy: 0.5185\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 22.0084\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 439.6035 - Validation Accuracy: 0.5161\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 22.0910\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 438.8610 - Validation Accuracy: 0.5139\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 22.1859\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0021545919741448962, 4: 0.026768377253814146, 5: 0.9934191904896689}\n",
      "Validation Loss: 440.2637 - Validation Accuracy: 0.5209\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 22.1442\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 425.8037 - Validation Accuracy: 0.5237\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 20.8478\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.9977356354373054}\n",
      "Validation Loss: 439.9472 - Validation Accuracy: 0.5211\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 21.0048\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 440.8186 - Validation Accuracy: 0.5147\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 21.8084\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.9977356354373054}\n",
      "Validation Loss: 440.4183 - Validation Accuracy: 0.5179\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 21.1048\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 436.7823 - Validation Accuracy: 0.5220\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 21.0546\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.9977356354373054}\n",
      "Validation Loss: 432.3280 - Validation Accuracy: 0.5200\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 20.9714\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 437.6830 - Validation Accuracy: 0.5189\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 21.1893\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.024410540915395285, 5: 0.9963911689782055}\n",
      "Validation Loss: 432.6163 - Validation Accuracy: 0.5196\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 22.1804\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.9954712708746108}\n",
      "Validation Loss: 433.5480 - Validation Accuracy: 0.5148\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 21.7495\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0846740638002774, 5: 0.95810925559015}\n",
      "Validation Loss: 436.9093 - Validation Accuracy: 0.5176\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 21.1039\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 424.9755 - Validation Accuracy: 0.5260\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 21.1447\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 440.9132 - Validation Accuracy: 0.5181\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 21.6605\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 443.3134 - Validation Accuracy: 0.5165\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 20.0699\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 437.9360 - Validation Accuracy: 0.5183\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 21.8096\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 436.8074 - Validation Accuracy: 0.5169\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 22.1779\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 430.0021 - Validation Accuracy: 0.5221\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 20.9220\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}\n",
      "Validation Loss: 435.6702 - Validation Accuracy: 0.5259\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 20.9088\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.14466019417475728, 5: 0.9843617322388905}\n",
      "Validation Loss: 431.1470 - Validation Accuracy: 0.5255\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 21.2431\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01823855755894591, 5: 1.0}\n",
      "Validation Loss: 431.0842 - Validation Accuracy: 0.5236\n",
      "Loss on test set: 433.9913\n",
      "Accuracy on test set: 0.5236\n",
      "Incremental ARC Classifier added, current len: 7\n",
      "Incremental Task Layer added, current len: 7\n",
      "tsk: 6\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'incremental_layers.2', 'incremental_layers.3', 'incremental_layers.4', 'incremental_layers.5', 'arc_classifiers.0.4', 'arc_classifiers.1.4', 'arc_classifiers.2.4', 'arc_classifiers.3.4', 'arc_classifiers.4.4', 'arc_classifiers.4.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "----Freezing incremental_layers.2.gate_generator.0.weight\n",
      "----Freezing incremental_layers.2.gate_generator.0.bias\n",
      "----Freezing incremental_layers.2.gate_generator.1.weight\n",
      "----Freezing incremental_layers.2.gate_generator.1.bias\n",
      "----Freezing incremental_layers.2.gate_generator.5.weight\n",
      "----Freezing incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing incremental_layers.3.layer.0.weight\n",
      "----Freezing incremental_layers.3.layer.0.bias\n",
      "----Freezing incremental_layers.3.layer.1.weight\n",
      "----Freezing incremental_layers.3.layer.1.bias\n",
      "----Freezing incremental_layers.3.layer.3.weight\n",
      "----Freezing incremental_layers.3.layer.3.bias\n",
      "----Freezing incremental_layers.3.layer.4.weight\n",
      "----Freezing incremental_layers.3.layer.4.bias\n",
      "----Freezing incremental_layers.3.layer.6.weight\n",
      "----Freezing incremental_layers.3.layer.6.bias\n",
      "----Freezing incremental_layers.3.layer.7.weight\n",
      "----Freezing incremental_layers.3.layer.7.bias\n",
      "----Freezing incremental_layers.3.gate_generator.0.weight\n",
      "----Freezing incremental_layers.3.gate_generator.0.bias\n",
      "----Freezing incremental_layers.3.gate_generator.1.weight\n",
      "----Freezing incremental_layers.3.gate_generator.1.bias\n",
      "----Freezing incremental_layers.3.gate_generator.5.weight\n",
      "----Freezing incremental_layers.3.gate_generator.5.bias\n",
      "----Freezing incremental_layers.4.layer.0.weight\n",
      "----Freezing incremental_layers.4.layer.0.bias\n",
      "----Freezing incremental_layers.4.layer.1.weight\n",
      "----Freezing incremental_layers.4.layer.1.bias\n",
      "----Freezing incremental_layers.4.layer.3.weight\n",
      "----Freezing incremental_layers.4.layer.3.bias\n",
      "----Freezing incremental_layers.4.layer.4.weight\n",
      "----Freezing incremental_layers.4.layer.4.bias\n",
      "----Freezing incremental_layers.4.layer.6.weight\n",
      "----Freezing incremental_layers.4.layer.6.bias\n",
      "----Freezing incremental_layers.4.layer.7.weight\n",
      "----Freezing incremental_layers.4.layer.7.bias\n",
      "----Freezing incremental_layers.4.gate_generator.0.weight\n",
      "----Freezing incremental_layers.4.gate_generator.0.bias\n",
      "----Freezing incremental_layers.4.gate_generator.1.weight\n",
      "----Freezing incremental_layers.4.gate_generator.1.bias\n",
      "----Freezing incremental_layers.4.gate_generator.5.weight\n",
      "----Freezing incremental_layers.4.gate_generator.5.bias\n",
      "----Freezing incremental_layers.5.layer.0.weight\n",
      "----Freezing incremental_layers.5.layer.0.bias\n",
      "----Freezing incremental_layers.5.layer.1.weight\n",
      "----Freezing incremental_layers.5.layer.1.bias\n",
      "----Freezing incremental_layers.5.layer.3.weight\n",
      "----Freezing incremental_layers.5.layer.3.bias\n",
      "----Freezing incremental_layers.5.layer.4.weight\n",
      "----Freezing incremental_layers.5.layer.4.bias\n",
      "----Freezing incremental_layers.5.layer.6.weight\n",
      "----Freezing incremental_layers.5.layer.6.bias\n",
      "----Freezing incremental_layers.5.layer.7.weight\n",
      "----Freezing incremental_layers.5.layer.7.bias\n",
      "----Freezing incremental_layers.5.gate_generator.0.weight\n",
      "----Freezing incremental_layers.5.gate_generator.0.bias\n",
      "----Freezing incremental_layers.5.gate_generator.1.weight\n",
      "----Freezing incremental_layers.5.gate_generator.1.bias\n",
      "----Freezing incremental_layers.5.gate_generator.5.weight\n",
      "----Freezing incremental_layers.5.gate_generator.5.bias\n",
      "incremental_layers.6.layer.0.weight\n",
      "incremental_layers.6.layer.0.bias\n",
      "incremental_layers.6.layer.1.weight\n",
      "incremental_layers.6.layer.1.bias\n",
      "incremental_layers.6.layer.3.weight\n",
      "incremental_layers.6.layer.3.bias\n",
      "incremental_layers.6.layer.4.weight\n",
      "incremental_layers.6.layer.4.bias\n",
      "incremental_layers.6.layer.6.weight\n",
      "incremental_layers.6.layer.6.bias\n",
      "incremental_layers.6.layer.7.weight\n",
      "incremental_layers.6.layer.7.bias\n",
      "incremental_layers.6.gate_generator.0.weight\n",
      "incremental_layers.6.gate_generator.0.bias\n",
      "incremental_layers.6.gate_generator.1.weight\n",
      "incremental_layers.6.gate_generator.1.bias\n",
      "incremental_layers.6.gate_generator.5.weight\n",
      "incremental_layers.6.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "----Freezing arc_classifiers.3.4.weight\n",
      "arc_classifiers.4.2.weight\n",
      "arc_classifiers.4.2.bias\n",
      "----Freezing arc_classifiers.4.4.weight\n",
      "----Freezing arc_classifiers.4.4.weight\n",
      "arc_classifiers.5.2.weight\n",
      "arc_classifiers.5.2.bias\n",
      "arc_classifiers.5.4.weight\n",
      "arc_classifiers.6.2.weight\n",
      "arc_classifiers.6.2.bias\n",
      "arc_classifiers.6.4.weight\n",
      "alpha_cls: [199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008   7.999936   7.999936   7.999936\n",
      "   7.999936   7.999936]\n",
      "alpha_tsk: [ 3.999984   39.99840006 39.99840006 39.99840006 39.99840006 39.99840006\n",
      "  1.59999744]\n",
      "num_prev_task_classes: 75\n",
      "alpha_cls: [199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008 199.960008 199.960008 199.960008\n",
      " 199.960008 199.960008 199.960008   7.999936   7.999936   7.999936\n",
      "   7.999936   7.999936]\n",
      "alpha_tsk: [ 25.59890226 255.97980735 255.97980735 255.97980735 255.97980735\n",
      " 255.97980735  10.23958548]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "aa29d918b4cf414f92a09694cceea6bc",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1920 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 53.2699\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.564534390036796, 6: 0.8720331712896768}\n",
      "Validation Loss: 485.9766 - Validation Accuracy: 0.4646\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 42.1062\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5808802717237476, 6: 0.8345010008578782}\n",
      "Validation Loss: 470.7712 - Validation Accuracy: 0.4803\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 40.7827\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.4266911972827625, 6: 0.8798255647698027}\n",
      "Validation Loss: 465.7998 - Validation Accuracy: 0.4835\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 37.0908\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.43610246249646195, 6: 0.8411495567629397}\n",
      "Validation Loss: 469.3320 - Validation Accuracy: 0.4835\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 35.1606\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.544650438720634, 6: 0.8285673434372319}\n",
      "Validation Loss: 467.2225 - Validation Accuracy: 0.4854\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 36.4648\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.4941975658080951, 6: 0.8599513869030597}\n",
      "Validation Loss: 478.4756 - Validation Accuracy: 0.4781\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 36.4545\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5774837248797057, 6: 0.7704460966542751}\n",
      "Validation Loss: 469.2678 - Validation Accuracy: 0.4838\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 32.5955\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.52405887347863, 6: 0.8210609093508722}\n",
      "Validation Loss: 469.4529 - Validation Accuracy: 0.4838\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 33.6278\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.34170676478913103, 6: 0.7933228481555619}\n",
      "Validation Loss: 467.7895 - Validation Accuracy: 0.4831\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 33.6684\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.49936314746674215, 6: 0.7404203603088362}\n",
      "Validation Loss: 469.2749 - Validation Accuracy: 0.4835\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 35.5676\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5382819133880554, 6: 0.6894480983700314}\n",
      "Validation Loss: 473.8327 - Validation Accuracy: 0.4778\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 34.3444\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5992074724030569, 6: 0.6607806691449815}\n",
      "Validation Loss: 469.4121 - Validation Accuracy: 0.4883\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 34.9176\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5877441268044155, 6: 0.6918787532170432}\n",
      "Validation Loss: 470.1192 - Validation Accuracy: 0.4859\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 33.2767\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.4029861307670535, 6: 0.8102659422362024}\n",
      "Validation Loss: 467.1201 - Validation Accuracy: 0.4814\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 34.8523\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6076280781205774, 6: 0.632613668859022}\n",
      "Validation Loss: 470.4722 - Validation Accuracy: 0.4833\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 32.2126\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6233371072742712, 6: 0.6697169002001716}\n",
      "Validation Loss: 465.5067 - Validation Accuracy: 0.4885\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 32.9651\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6043022926691197, 6: 0.6326851587074636}\n",
      "Validation Loss: 475.7563 - Validation Accuracy: 0.4859\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 32.9065\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5355929804698556, 6: 0.8209894195024307}\n",
      "Validation Loss: 469.0271 - Validation Accuracy: 0.4879\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 31.9356\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6003396546844042, 6: 0.7262653703174149}\n",
      "Validation Loss: 468.1775 - Validation Accuracy: 0.4883\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 31.5831\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6537645060854798, 6: 0.5927938232770946}\n",
      "Validation Loss: 468.7845 - Validation Accuracy: 0.4859\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 34.1972\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7869374469289555, 6: 0.5265942236202459}\n",
      "Validation Loss: 470.4952 - Validation Accuracy: 0.4883\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 33.1074\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6082649306538352, 6: 0.714612525021447}\n",
      "Validation Loss: 466.0414 - Validation Accuracy: 0.4855\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 31.5403\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6345174073025757, 6: 0.7805261652845296}\n",
      "Validation Loss: 468.2760 - Validation Accuracy: 0.4884\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 32.0127\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6179592414378715, 6: 0.6584929939948527}\n",
      "Validation Loss: 466.3979 - Validation Accuracy: 0.4886\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 31.9612\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.8326493065383527, 6: 0.5641978839004861}\n",
      "Validation Loss: 465.6297 - Validation Accuracy: 0.4856\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 30.9711\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.8218228134729691, 6: 0.7006005147269088}\n",
      "Validation Loss: 467.9817 - Validation Accuracy: 0.4898\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 33.6355\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6381262383243702, 6: 0.6317557906777238}\n",
      "Validation Loss: 459.4641 - Validation Accuracy: 0.4884\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 30.2652\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7937305406170393, 6: 0.4487417786674292}\n",
      "Validation Loss: 477.0444 - Validation Accuracy: 0.4858\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 29.9969\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.009361997226074895, 5: 0.6832012454005095, 6: 0.6115241635687733}\n",
      "Validation Loss: 466.5864 - Validation Accuracy: 0.4959\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 32.2205\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7343617322388905, 6: 0.5046468401486989}\n",
      "Validation Loss: 468.4223 - Validation Accuracy: 0.4881\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 30.0723\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.806679875459949, 6: 0.42264798398627396}\n",
      "Validation Loss: 467.4506 - Validation Accuracy: 0.4889\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 30.2972\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5608547976224172, 6: 0.6887331998856162}\n",
      "Validation Loss: 464.6402 - Validation Accuracy: 0.4924\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 30.4450\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6326776110953863, 6: 0.6502716614240778}\n",
      "Validation Loss: 472.7751 - Validation Accuracy: 0.4874\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 29.3348\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.007142857142857143, 5: 0.7932352108689499, 6: 0.4435230197311982}\n",
      "Validation Loss: 468.2086 - Validation Accuracy: 0.4869\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 29.0426\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7192187942258703, 6: 0.5325278810408922}\n",
      "Validation Loss: 475.5544 - Validation Accuracy: 0.4915\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 29.6633\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01650485436893204, 5: 0.8707189357486556, 6: 0.432942522161853}\n",
      "Validation Loss: 477.5266 - Validation Accuracy: 0.4849\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 30.4237\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01650485436893204, 5: 0.6809368808378149, 6: 0.5025736345438948}\n",
      "Validation Loss: 480.0248 - Validation Accuracy: 0.4830\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 31.8283\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.7364845740164166, 6: 0.5130826422647984}\n",
      "Validation Loss: 466.3967 - Validation Accuracy: 0.4894\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 30.4249\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7461788848004529, 6: 0.4332284815556191}\n",
      "Validation Loss: 468.7375 - Validation Accuracy: 0.4878\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 30.8932\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7275686385508067, 6: 0.5021446954532457}\n",
      "Validation Loss: 466.6490 - Validation Accuracy: 0.4899\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 30.9815\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.8442541749221625, 6: 0.3164140692021733}\n",
      "Validation Loss: 471.6371 - Validation Accuracy: 0.4876\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 29.2753\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7908293235210869, 6: 0.4180726336860166}\n",
      "Validation Loss: 471.4702 - Validation Accuracy: 0.4840\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 29.1391\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01872399445214979, 5: 0.8238748938579111, 6: 0.47190448956248215}\n",
      "Validation Loss: 472.9202 - Validation Accuracy: 0.4881\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 31.7263\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.8218228134729691, 6: 0.3711752931083786}\n",
      "Validation Loss: 469.4481 - Validation Accuracy: 0.4904\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 29.1766\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.023162274618585298, 5: 0.7491508632889895, 6: 0.42379182156133827}\n",
      "Validation Loss: 471.7615 - Validation Accuracy: 0.4865\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 29.7874\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01872399445214979, 5: 0.8279082932352109, 6: 0.39905633400057194}\n",
      "Validation Loss: 477.3344 - Validation Accuracy: 0.4815\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 29.3159\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01650485436893204, 5: 0.7422870082083215, 6: 0.43094080640549043}\n",
      "Validation Loss: 473.6377 - Validation Accuracy: 0.4840\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 28.2250\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7589866968581942, 6: 0.3597369173577352}\n",
      "Validation Loss: 460.7141 - Validation Accuracy: 0.4974\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 30.2964\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.013800277392510402, 5: 0.7570053778658364, 6: 0.37189019159279385}\n",
      "Validation Loss: 470.2330 - Validation Accuracy: 0.4885\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 28.1002\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01650485436893204, 5: 0.888055476931786, 6: 0.2647269087789534}\n",
      "Validation Loss: 480.3249 - Validation Accuracy: 0.4848\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 28.6412\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.014285714285714285, 5: 0.7785876026040193, 6: 0.36702888189877036}\n",
      "Validation Loss: 468.9594 - Validation Accuracy: 0.4875\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 28.6571\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.02538141470180305, 5: 0.8192754033399378, 6: 0.2614383757506434}\n",
      "Validation Loss: 473.8181 - Validation Accuracy: 0.4865\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 27.3519\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.01650485436893204, 5: 0.7315312765355222, 6: 0.451315413211324}\n",
      "Validation Loss: 473.3127 - Validation Accuracy: 0.4889\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 28.9426\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.023162274618585298, 5: 0.8639258420605718, 6: 0.2571489848441521}\n",
      "Validation Loss: 466.6322 - Validation Accuracy: 0.4939\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 28.5278\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.023162274618585298, 5: 0.7136286442117181, 6: 0.3127680869316557}\n",
      "Validation Loss: 466.2921 - Validation Accuracy: 0.4909\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 27.6543\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.020943134535367544, 5: 0.8092980469855646, 6: 0.38168430082928223}\n",
      "Validation Loss: 464.8539 - Validation Accuracy: 0.4878\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 29.2914\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.009361997226074895, 5: 0.8151712425700538, 6: 0.3940520446096654}\n",
      "Validation Loss: 455.4320 - Validation Accuracy: 0.4916\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 27.4772\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.013800277392510402, 5: 0.7069063119162186, 6: 0.4892765227337718}\n",
      "Validation Loss: 465.3752 - Validation Accuracy: 0.4888\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 27.6469\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.020943134535367544, 5: 0.8429097084630626, 6: 0.298827566485559}\n",
      "Validation Loss: 467.3394 - Validation Accuracy: 0.4919\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 27.7422\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.020943134535367544, 5: 0.7942258703651288, 6: 0.24413783242779524}\n",
      "Validation Loss: 465.4557 - Validation Accuracy: 0.4919\n",
      "Loss on test set: 469.0488\n",
      "Accuracy on test set: 0.4919\n",
      "Incremental ARC Classifier added, current len: 8\n",
      "Incremental Task Layer added, current len: 8\n",
      "tsk: 7\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'incremental_layers.2', 'incremental_layers.3', 'incremental_layers.4', 'incremental_layers.5', 'incremental_layers.6', 'arc_classifiers.0.4', 'arc_classifiers.1.4', 'arc_classifiers.2.4', 'arc_classifiers.3.4', 'arc_classifiers.4.4', 'arc_classifiers.5.4', 'arc_classifiers.5.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "----Freezing incremental_layers.2.gate_generator.0.weight\n",
      "----Freezing incremental_layers.2.gate_generator.0.bias\n",
      "----Freezing incremental_layers.2.gate_generator.1.weight\n",
      "----Freezing incremental_layers.2.gate_generator.1.bias\n",
      "----Freezing incremental_layers.2.gate_generator.5.weight\n",
      "----Freezing incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing incremental_layers.3.layer.0.weight\n",
      "----Freezing incremental_layers.3.layer.0.bias\n",
      "----Freezing incremental_layers.3.layer.1.weight\n",
      "----Freezing incremental_layers.3.layer.1.bias\n",
      "----Freezing incremental_layers.3.layer.3.weight\n",
      "----Freezing incremental_layers.3.layer.3.bias\n",
      "----Freezing incremental_layers.3.layer.4.weight\n",
      "----Freezing incremental_layers.3.layer.4.bias\n",
      "----Freezing incremental_layers.3.layer.6.weight\n",
      "----Freezing incremental_layers.3.layer.6.bias\n",
      "----Freezing incremental_layers.3.layer.7.weight\n",
      "----Freezing incremental_layers.3.layer.7.bias\n",
      "----Freezing incremental_layers.3.gate_generator.0.weight\n",
      "----Freezing incremental_layers.3.gate_generator.0.bias\n",
      "----Freezing incremental_layers.3.gate_generator.1.weight\n",
      "----Freezing incremental_layers.3.gate_generator.1.bias\n",
      "----Freezing incremental_layers.3.gate_generator.5.weight\n",
      "----Freezing incremental_layers.3.gate_generator.5.bias\n",
      "----Freezing incremental_layers.4.layer.0.weight\n",
      "----Freezing incremental_layers.4.layer.0.bias\n",
      "----Freezing incremental_layers.4.layer.1.weight\n",
      "----Freezing incremental_layers.4.layer.1.bias\n",
      "----Freezing incremental_layers.4.layer.3.weight\n",
      "----Freezing incremental_layers.4.layer.3.bias\n",
      "----Freezing incremental_layers.4.layer.4.weight\n",
      "----Freezing incremental_layers.4.layer.4.bias\n",
      "----Freezing incremental_layers.4.layer.6.weight\n",
      "----Freezing incremental_layers.4.layer.6.bias\n",
      "----Freezing incremental_layers.4.layer.7.weight\n",
      "----Freezing incremental_layers.4.layer.7.bias\n",
      "----Freezing incremental_layers.4.gate_generator.0.weight\n",
      "----Freezing incremental_layers.4.gate_generator.0.bias\n",
      "----Freezing incremental_layers.4.gate_generator.1.weight\n",
      "----Freezing incremental_layers.4.gate_generator.1.bias\n",
      "----Freezing incremental_layers.4.gate_generator.5.weight\n",
      "----Freezing incremental_layers.4.gate_generator.5.bias\n",
      "----Freezing incremental_layers.5.layer.0.weight\n",
      "----Freezing incremental_layers.5.layer.0.bias\n",
      "----Freezing incremental_layers.5.layer.1.weight\n",
      "----Freezing incremental_layers.5.layer.1.bias\n",
      "----Freezing incremental_layers.5.layer.3.weight\n",
      "----Freezing incremental_layers.5.layer.3.bias\n",
      "----Freezing incremental_layers.5.layer.4.weight\n",
      "----Freezing incremental_layers.5.layer.4.bias\n",
      "----Freezing incremental_layers.5.layer.6.weight\n",
      "----Freezing incremental_layers.5.layer.6.bias\n",
      "----Freezing incremental_layers.5.layer.7.weight\n",
      "----Freezing incremental_layers.5.layer.7.bias\n",
      "----Freezing incremental_layers.5.gate_generator.0.weight\n",
      "----Freezing incremental_layers.5.gate_generator.0.bias\n",
      "----Freezing incremental_layers.5.gate_generator.1.weight\n",
      "----Freezing incremental_layers.5.gate_generator.1.bias\n",
      "----Freezing incremental_layers.5.gate_generator.5.weight\n",
      "----Freezing incremental_layers.5.gate_generator.5.bias\n",
      "----Freezing incremental_layers.6.layer.0.weight\n",
      "----Freezing incremental_layers.6.layer.0.bias\n",
      "----Freezing incremental_layers.6.layer.1.weight\n",
      "----Freezing incremental_layers.6.layer.1.bias\n",
      "----Freezing incremental_layers.6.layer.3.weight\n",
      "----Freezing incremental_layers.6.layer.3.bias\n",
      "----Freezing incremental_layers.6.layer.4.weight\n",
      "----Freezing incremental_layers.6.layer.4.bias\n",
      "----Freezing incremental_layers.6.layer.6.weight\n",
      "----Freezing incremental_layers.6.layer.6.bias\n",
      "----Freezing incremental_layers.6.layer.7.weight\n",
      "----Freezing incremental_layers.6.layer.7.bias\n",
      "----Freezing incremental_layers.6.gate_generator.0.weight\n",
      "----Freezing incremental_layers.6.gate_generator.0.bias\n",
      "----Freezing incremental_layers.6.gate_generator.1.weight\n",
      "----Freezing incremental_layers.6.gate_generator.1.bias\n",
      "----Freezing incremental_layers.6.gate_generator.5.weight\n",
      "----Freezing incremental_layers.6.gate_generator.5.bias\n",
      "incremental_layers.7.layer.0.weight\n",
      "incremental_layers.7.layer.0.bias\n",
      "incremental_layers.7.layer.1.weight\n",
      "incremental_layers.7.layer.1.bias\n",
      "incremental_layers.7.layer.3.weight\n",
      "incremental_layers.7.layer.3.bias\n",
      "incremental_layers.7.layer.4.weight\n",
      "incremental_layers.7.layer.4.bias\n",
      "incremental_layers.7.layer.6.weight\n",
      "incremental_layers.7.layer.6.bias\n",
      "incremental_layers.7.layer.7.weight\n",
      "incremental_layers.7.layer.7.bias\n",
      "incremental_layers.7.gate_generator.0.weight\n",
      "incremental_layers.7.gate_generator.0.bias\n",
      "incremental_layers.7.gate_generator.1.weight\n",
      "incremental_layers.7.gate_generator.1.bias\n",
      "incremental_layers.7.gate_generator.5.weight\n",
      "incremental_layers.7.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "----Freezing arc_classifiers.3.4.weight\n",
      "arc_classifiers.4.2.weight\n",
      "arc_classifiers.4.2.bias\n",
      "----Freezing arc_classifiers.4.4.weight\n",
      "arc_classifiers.5.2.weight\n",
      "arc_classifiers.5.2.bias\n",
      "----Freezing arc_classifiers.5.4.weight\n",
      "----Freezing arc_classifiers.5.4.weight\n",
      "arc_classifiers.6.2.weight\n",
      "arc_classifiers.6.2.bias\n",
      "arc_classifiers.6.4.weight\n",
      "arc_classifiers.7.2.weight\n",
      "arc_classifiers.7.2.bias\n",
      "arc_classifiers.7.4.weight\n",
      "alpha_cls: [204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      "   8.19993276   8.19993276   8.19993276   8.19993276   8.19993276]\n",
      "alpha_tsk: [ 4.09998319 40.99831907 40.99831907 40.99831907 40.99831907 40.99831907\n",
      " 40.99831907  1.63999731]\n",
      "num_prev_task_classes: 80\n",
      "alpha_cls: [204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      " 204.95798361 204.95798361 204.95798361 204.95798361 204.95798361\n",
      "   8.19993276   8.19993276   8.19993276   8.19993276   8.19993276]\n",
      "alpha_tsk: [ 25.197518   251.9658825  251.9658825  251.9658825  251.9658825\n",
      " 251.9658825  251.9658825   10.07903199]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "cb7c40fb3a0d4311b652bb7addc249c6",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1980 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 62.9895\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5767761109538636, 6: 0.09136402630826422, 7: 0.892622247640835}\n",
      "Validation Loss: 506.7391 - Validation Accuracy: 0.4460\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 75.4166\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6396829889612228, 6: 0.1204603946239634, 7: 0.7610094366599943}\n",
      "Validation Loss: 514.9340 - Validation Accuracy: 0.4361\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 59.5372\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6086894990093406, 6: 0.12846725764941377, 7: 0.811838718901916}\n",
      "Validation Loss: 514.4526 - Validation Accuracy: 0.4411\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 58.5928\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5864704217378998, 6: 0.13533028309979983, 7: 0.7958249928510152}\n",
      "Validation Loss: 500.2163 - Validation Accuracy: 0.4472\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 68.3994\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.5873903198414945, 6: 0.11130969402344867, 7: 0.811838718901916}\n",
      "Validation Loss: 501.3338 - Validation Accuracy: 0.4504\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 49.1341\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.6562411548259269, 6: 0.15456105233056905, 7: 0.6486273949099228}\n",
      "Validation Loss: 499.5891 - Validation Accuracy: 0.4502\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 50.6432\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.6207189357486556, 6: 0.12825278810408922, 7: 0.7032456391192451}\n",
      "Validation Loss: 491.7601 - Validation Accuracy: 0.4604\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 53.4978\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.5920605717520521, 6: 0.11538461538461539, 7: 0.7467829568201315}\n",
      "Validation Loss: 503.3865 - Validation Accuracy: 0.4504\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 59.6460\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6050099065949618, 6: 0.11359736917357735, 7: 0.6552044609665427}\n",
      "Validation Loss: 506.2596 - Validation Accuracy: 0.4461\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 59.6913\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.6268044155108973, 6: 0.16557048899056334, 7: 0.6457678009722619}\n",
      "Validation Loss: 502.1300 - Validation Accuracy: 0.4506\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 51.2253\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.7044296631757713, 6: 0.22211895910780668, 7: 0.436374034887046}\n",
      "Validation Loss: 497.2313 - Validation Accuracy: 0.4460\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 54.9136\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.6942400226436456, 6: 0.19166428367171862, 7: 0.48034029167858167}\n",
      "Validation Loss: 492.1001 - Validation Accuracy: 0.4526\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 48.9828\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.6985564675912822, 6: 0.18480125822133256, 7: 0.505075779239348}\n",
      "Validation Loss: 493.8821 - Validation Accuracy: 0.4619\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 60.2237\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.6879422587036513, 6: 0.216399771232485, 7: 0.5213039748355733}\n",
      "Validation Loss: 491.9511 - Validation Accuracy: 0.4545\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 58.5473\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.725304273988112, 6: 0.21432656562768088, 7: 0.48670288818987706}\n",
      "Validation Loss: 488.1153 - Validation Accuracy: 0.4545\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 45.3962\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7619586753467308, 6: 0.18708893337146126, 7: 0.38304260794967115}\n",
      "Validation Loss: 486.3953 - Validation Accuracy: 0.4578\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 46.5974\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.729054627795075, 6: 0.2120388904775522, 7: 0.4043465827852445}\n",
      "Validation Loss: 492.4643 - Validation Accuracy: 0.4602\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 46.8770\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.743136144919332, 6: 0.21568487274806977, 7: 0.44974263654561053}\n",
      "Validation Loss: 491.7033 - Validation Accuracy: 0.4525\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 54.5030\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7336541183130484, 6: 0.1630683442951101, 7: 0.5105804975693452}\n",
      "Validation Loss: 486.4304 - Validation Accuracy: 0.4621\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 48.7266\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.7478771582224738, 6: 0.239991421218187, 7: 0.36702888189877036}\n",
      "Validation Loss: 486.9182 - Validation Accuracy: 0.4575\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 64.6338\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.743136144919332, 6: 0.23105519016299686, 7: 0.27266228195596226}\n",
      "Validation Loss: 479.2301 - Validation Accuracy: 0.4612\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 46.5869\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.7657090291536938, 6: 0.22190448956248213, 7: 0.2584358021160995}\n",
      "Validation Loss: 491.9980 - Validation Accuracy: 0.4527\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 53.5514\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7505660911406736, 6: 0.22326279668287102, 7: 0.2923934801258221}\n",
      "Validation Loss: 494.1452 - Validation Accuracy: 0.4567\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 48.8184\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7495046702519106, 6: 0.2560051472690878, 7: 0.18036888761795825}\n",
      "Validation Loss: 486.2308 - Validation Accuracy: 0.4591\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 47.6358\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7376875176903481, 6: 0.23427223334286532, 7: 0.3420074349442379}\n",
      "Validation Loss: 488.7801 - Validation Accuracy: 0.4573\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 54.2246\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7723606000566091, 6: 0.236559908492994, 7: 0.2838861881612811}\n",
      "Validation Loss: 493.5324 - Validation Accuracy: 0.4579\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 53.3737\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7372629493348429, 6: 0.23312839576780098, 7: 0.33814698312839575}\n",
      "Validation Loss: 495.5172 - Validation Accuracy: 0.4559\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 43.8441\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.752405887347863, 6: 0.2445667715184444, 7: 0.3127680869316557}\n",
      "Validation Loss: 488.8530 - Validation Accuracy: 0.4607\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 53.4322\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.7249504670251911, 6: 0.23448670288818987, 7: 0.46225336002287676}\n",
      "Validation Loss: 492.5674 - Validation Accuracy: 0.4613\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 50.3946\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7623124823096519, 6: 0.22576494137832429, 7: 0.27066056619959966}\n",
      "Validation Loss: 501.2855 - Validation Accuracy: 0.4539\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 47.3398\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7521936031701104, 6: 0.22097512153274235, 7: 0.34572490706319703}\n",
      "Validation Loss: 489.7921 - Validation Accuracy: 0.4609\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 45.4735\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7225445796773281, 6: 0.23448670288818987, 7: 0.4091364026308264}\n",
      "Validation Loss: 497.8261 - Validation Accuracy: 0.4549\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 60.4480\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.7435607132748373, 6: 0.2168287103231341, 7: 0.3612382041750071}\n",
      "Validation Loss: 498.9048 - Validation Accuracy: 0.4559\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 45.0980\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.6659354656099632, 6: 0.17472118959107807, 7: 0.5824277952530741}\n",
      "Validation Loss: 494.4112 - Validation Accuracy: 0.4573\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 50.4298\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.6832720067930936, 6: 0.18201315413211325, 7: 0.5517586502716614}\n",
      "Validation Loss: 490.0839 - Validation Accuracy: 0.4615\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 44.3780\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.6835550523634305, 6: 0.21318272805261654, 7: 0.48970546182442093}\n",
      "Validation Loss: 496.3875 - Validation Accuracy: 0.4589\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 45.2144\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7408717803566374, 6: 0.2168287103231341, 7: 0.4265084358021161}\n",
      "Validation Loss: 494.3017 - Validation Accuracy: 0.4602\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 42.0558\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.7110104726861025, 6: 0.22169002001715757, 7: 0.5009293680297398}\n",
      "Validation Loss: 498.0909 - Validation Accuracy: 0.4562\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 63.9875\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.008876560332871012, 5: 0.694027738465893, 6: 0.21182442093222761, 7: 0.5249499571060909}\n",
      "Validation Loss: 487.4151 - Validation Accuracy: 0.4661\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 46.4801\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.7696009057458251, 6: 0.24342293394338005, 7: 0.16185301687160422}\n",
      "Validation Loss: 487.5893 - Validation Accuracy: 0.4604\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 48.9875\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7558024341919048, 6: 0.24549613954818417, 7: 0.1563482985416071}\n",
      "Validation Loss: 490.9832 - Validation Accuracy: 0.4586\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 43.6597\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7578545145768468, 6: 0.236559908492994, 7: 0.2181155275950815}\n",
      "Validation Loss: 490.5053 - Validation Accuracy: 0.4607\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 42.7581\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.7569346164732522, 6: 0.27230483271375466, 7: 0.05747783814698313}\n",
      "Validation Loss: 494.9059 - Validation Accuracy: 0.4589\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 48.3273\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7618879139541466, 6: 0.24113525879325134, 7: 0.20353159851301114}\n",
      "Validation Loss: 487.1064 - Validation Accuracy: 0.4659\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 43.8350\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.7621709595244834, 6: 0.22991135258793252, 7: 0.2828138404346583}\n",
      "Validation Loss: 486.6642 - Validation Accuracy: 0.4593\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 47.6104\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.8073874893857911, 6: 0.24871318272805262, 7: 0.023806119531026595}\n",
      "Validation Loss: 488.8651 - Validation Accuracy: 0.4586\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 43.3970\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7954288140390603, 6: 0.23677437803831855, 7: 0.009150700600514728}\n",
      "Validation Loss: 489.9667 - Validation Accuracy: 0.4568\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 45.2974\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.008876560332871012, 5: 0.7940135861873762, 6: 0.2571489848441521, 7: 0.045324563911924506}\n",
      "Validation Loss: 494.1249 - Validation Accuracy: 0.4611\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 44.4905\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.8046985564675913, 6: 0.24728338575922218, 7: 0.024949957106090936}\n",
      "Validation Loss: 486.7697 - Validation Accuracy: 0.4631\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 42.3898\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.7793659779224456, 6: 0.2592221904489562, 7: 0.0879325135830712}\n",
      "Validation Loss: 492.0635 - Validation Accuracy: 0.4616\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 43.8186\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.7787291253891877, 6: 0.24342293394338005, 7: 0.07077494995710609}\n",
      "Validation Loss: 490.7848 - Validation Accuracy: 0.4639\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 58.9135\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7904047551655816, 6: 0.24592507863883328, 7: 0.041893051186731485}\n",
      "Validation Loss: 492.0034 - Validation Accuracy: 0.4595\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 43.3537\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7836116614774978, 6: 0.24706891621389762, 7: 0.10916499857020304}\n",
      "Validation Loss: 476.6296 - Validation Accuracy: 0.4668\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 43.1703\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.783682422870082, 6: 0.23884758364312267, 7: 0.2231913068344295}\n",
      "Validation Loss: 484.4729 - Validation Accuracy: 0.4647\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 42.9490\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.008876560332871012, 5: 0.8126238324370224, 6: 0.22304832713754646, 7: 0.1721475550471833}\n",
      "Validation Loss: 487.2493 - Validation Accuracy: 0.4661\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 61.6085\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.008876560332871012, 5: 0.7949334842909709, 6: 0.20081498427223335, 7: 0.19345152988275666}\n",
      "Validation Loss: 481.4085 - Validation Accuracy: 0.4680\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 42.1282\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.7378998018681008, 6: 0.239991421218187, 7: 0.22733771804403774}\n",
      "Validation Loss: 495.3976 - Validation Accuracy: 0.4536\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 44.2498\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.008876560332871012, 5: 0.765779790546278, 6: 0.22876751501286818, 7: 0.17400629110666285}\n",
      "Validation Loss: 490.3246 - Validation Accuracy: 0.4651\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 43.1087\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.7727851684121143, 6: 0.23177008864741205, 7: 0.14469545324563912}\n",
      "Validation Loss: 487.4988 - Validation Accuracy: 0.4626\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 53.4250\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.785026889329182, 6: 0.22898198455819274, 7: 0.11059479553903345}\n",
      "Validation Loss: 485.6507 - Validation Accuracy: 0.4674\n",
      "Loss on test set: 489.7888\n",
      "Accuracy on test set: 0.4674\n",
      "Incremental ARC Classifier added, current len: 9\n",
      "Incremental Task Layer added, current len: 9\n",
      "tsk: 8\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'incremental_layers.2', 'incremental_layers.3', 'incremental_layers.4', 'incremental_layers.5', 'incremental_layers.6', 'incremental_layers.7', 'arc_classifiers.0.4', 'arc_classifiers.1.4', 'arc_classifiers.2.4', 'arc_classifiers.3.4', 'arc_classifiers.4.4', 'arc_classifiers.5.4', 'arc_classifiers.6.4', 'arc_classifiers.6.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "----Freezing incremental_layers.2.gate_generator.0.weight\n",
      "----Freezing incremental_layers.2.gate_generator.0.bias\n",
      "----Freezing incremental_layers.2.gate_generator.1.weight\n",
      "----Freezing incremental_layers.2.gate_generator.1.bias\n",
      "----Freezing incremental_layers.2.gate_generator.5.weight\n",
      "----Freezing incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing incremental_layers.3.layer.0.weight\n",
      "----Freezing incremental_layers.3.layer.0.bias\n",
      "----Freezing incremental_layers.3.layer.1.weight\n",
      "----Freezing incremental_layers.3.layer.1.bias\n",
      "----Freezing incremental_layers.3.layer.3.weight\n",
      "----Freezing incremental_layers.3.layer.3.bias\n",
      "----Freezing incremental_layers.3.layer.4.weight\n",
      "----Freezing incremental_layers.3.layer.4.bias\n",
      "----Freezing incremental_layers.3.layer.6.weight\n",
      "----Freezing incremental_layers.3.layer.6.bias\n",
      "----Freezing incremental_layers.3.layer.7.weight\n",
      "----Freezing incremental_layers.3.layer.7.bias\n",
      "----Freezing incremental_layers.3.gate_generator.0.weight\n",
      "----Freezing incremental_layers.3.gate_generator.0.bias\n",
      "----Freezing incremental_layers.3.gate_generator.1.weight\n",
      "----Freezing incremental_layers.3.gate_generator.1.bias\n",
      "----Freezing incremental_layers.3.gate_generator.5.weight\n",
      "----Freezing incremental_layers.3.gate_generator.5.bias\n",
      "----Freezing incremental_layers.4.layer.0.weight\n",
      "----Freezing incremental_layers.4.layer.0.bias\n",
      "----Freezing incremental_layers.4.layer.1.weight\n",
      "----Freezing incremental_layers.4.layer.1.bias\n",
      "----Freezing incremental_layers.4.layer.3.weight\n",
      "----Freezing incremental_layers.4.layer.3.bias\n",
      "----Freezing incremental_layers.4.layer.4.weight\n",
      "----Freezing incremental_layers.4.layer.4.bias\n",
      "----Freezing incremental_layers.4.layer.6.weight\n",
      "----Freezing incremental_layers.4.layer.6.bias\n",
      "----Freezing incremental_layers.4.layer.7.weight\n",
      "----Freezing incremental_layers.4.layer.7.bias\n",
      "----Freezing incremental_layers.4.gate_generator.0.weight\n",
      "----Freezing incremental_layers.4.gate_generator.0.bias\n",
      "----Freezing incremental_layers.4.gate_generator.1.weight\n",
      "----Freezing incremental_layers.4.gate_generator.1.bias\n",
      "----Freezing incremental_layers.4.gate_generator.5.weight\n",
      "----Freezing incremental_layers.4.gate_generator.5.bias\n",
      "----Freezing incremental_layers.5.layer.0.weight\n",
      "----Freezing incremental_layers.5.layer.0.bias\n",
      "----Freezing incremental_layers.5.layer.1.weight\n",
      "----Freezing incremental_layers.5.layer.1.bias\n",
      "----Freezing incremental_layers.5.layer.3.weight\n",
      "----Freezing incremental_layers.5.layer.3.bias\n",
      "----Freezing incremental_layers.5.layer.4.weight\n",
      "----Freezing incremental_layers.5.layer.4.bias\n",
      "----Freezing incremental_layers.5.layer.6.weight\n",
      "----Freezing incremental_layers.5.layer.6.bias\n",
      "----Freezing incremental_layers.5.layer.7.weight\n",
      "----Freezing incremental_layers.5.layer.7.bias\n",
      "----Freezing incremental_layers.5.gate_generator.0.weight\n",
      "----Freezing incremental_layers.5.gate_generator.0.bias\n",
      "----Freezing incremental_layers.5.gate_generator.1.weight\n",
      "----Freezing incremental_layers.5.gate_generator.1.bias\n",
      "----Freezing incremental_layers.5.gate_generator.5.weight\n",
      "----Freezing incremental_layers.5.gate_generator.5.bias\n",
      "----Freezing incremental_layers.6.layer.0.weight\n",
      "----Freezing incremental_layers.6.layer.0.bias\n",
      "----Freezing incremental_layers.6.layer.1.weight\n",
      "----Freezing incremental_layers.6.layer.1.bias\n",
      "----Freezing incremental_layers.6.layer.3.weight\n",
      "----Freezing incremental_layers.6.layer.3.bias\n",
      "----Freezing incremental_layers.6.layer.4.weight\n",
      "----Freezing incremental_layers.6.layer.4.bias\n",
      "----Freezing incremental_layers.6.layer.6.weight\n",
      "----Freezing incremental_layers.6.layer.6.bias\n",
      "----Freezing incremental_layers.6.layer.7.weight\n",
      "----Freezing incremental_layers.6.layer.7.bias\n",
      "----Freezing incremental_layers.6.gate_generator.0.weight\n",
      "----Freezing incremental_layers.6.gate_generator.0.bias\n",
      "----Freezing incremental_layers.6.gate_generator.1.weight\n",
      "----Freezing incremental_layers.6.gate_generator.1.bias\n",
      "----Freezing incremental_layers.6.gate_generator.5.weight\n",
      "----Freezing incremental_layers.6.gate_generator.5.bias\n",
      "----Freezing incremental_layers.7.layer.0.weight\n",
      "----Freezing incremental_layers.7.layer.0.bias\n",
      "----Freezing incremental_layers.7.layer.1.weight\n",
      "----Freezing incremental_layers.7.layer.1.bias\n",
      "----Freezing incremental_layers.7.layer.3.weight\n",
      "----Freezing incremental_layers.7.layer.3.bias\n",
      "----Freezing incremental_layers.7.layer.4.weight\n",
      "----Freezing incremental_layers.7.layer.4.bias\n",
      "----Freezing incremental_layers.7.layer.6.weight\n",
      "----Freezing incremental_layers.7.layer.6.bias\n",
      "----Freezing incremental_layers.7.layer.7.weight\n",
      "----Freezing incremental_layers.7.layer.7.bias\n",
      "----Freezing incremental_layers.7.gate_generator.0.weight\n",
      "----Freezing incremental_layers.7.gate_generator.0.bias\n",
      "----Freezing incremental_layers.7.gate_generator.1.weight\n",
      "----Freezing incremental_layers.7.gate_generator.1.bias\n",
      "----Freezing incremental_layers.7.gate_generator.5.weight\n",
      "----Freezing incremental_layers.7.gate_generator.5.bias\n",
      "incremental_layers.8.layer.0.weight\n",
      "incremental_layers.8.layer.0.bias\n",
      "incremental_layers.8.layer.1.weight\n",
      "incremental_layers.8.layer.1.bias\n",
      "incremental_layers.8.layer.3.weight\n",
      "incremental_layers.8.layer.3.bias\n",
      "incremental_layers.8.layer.4.weight\n",
      "incremental_layers.8.layer.4.bias\n",
      "incremental_layers.8.layer.6.weight\n",
      "incremental_layers.8.layer.6.bias\n",
      "incremental_layers.8.layer.7.weight\n",
      "incremental_layers.8.layer.7.bias\n",
      "incremental_layers.8.gate_generator.0.weight\n",
      "incremental_layers.8.gate_generator.0.bias\n",
      "incremental_layers.8.gate_generator.1.weight\n",
      "incremental_layers.8.gate_generator.1.bias\n",
      "incremental_layers.8.gate_generator.5.weight\n",
      "incremental_layers.8.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "----Freezing arc_classifiers.3.4.weight\n",
      "arc_classifiers.4.2.weight\n",
      "arc_classifiers.4.2.bias\n",
      "----Freezing arc_classifiers.4.4.weight\n",
      "arc_classifiers.5.2.weight\n",
      "arc_classifiers.5.2.bias\n",
      "----Freezing arc_classifiers.5.4.weight\n",
      "arc_classifiers.6.2.weight\n",
      "arc_classifiers.6.2.bias\n",
      "----Freezing arc_classifiers.6.4.weight\n",
      "----Freezing arc_classifiers.6.4.weight\n",
      "arc_classifiers.7.2.weight\n",
      "arc_classifiers.7.2.bias\n",
      "arc_classifiers.7.4.weight\n",
      "arc_classifiers.8.2.weight\n",
      "arc_classifiers.8.2.bias\n",
      "arc_classifiers.8.4.weight\n",
      "alpha_cls: [209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      "   8.39992944   8.39992944   8.39992944   8.39992944   8.39992944]\n",
      "alpha_tsk: [ 4.19998236 41.99823607 41.99823607 41.99823607 41.99823607 41.99823607\n",
      " 41.99823607 41.99823607  1.67999718]\n",
      "num_prev_task_classes: 85\n",
      "alpha_cls: [209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      " 209.95590926 209.95590926 209.95590926 209.95590926 209.95590926\n",
      "   8.39992944   8.39992944   8.39992944   8.39992944   8.39992944]\n",
      "alpha_tsk: [ 25.05450224 250.53555215 250.53555215 250.53555215 250.53555215\n",
      " 250.53555215 250.53555215 250.53555215  10.02182615]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "30ba8b1ed2124501a23028442ebb5315",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/1980 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 68.4550\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.024837248797056326, 6: 0.0, 7: 0.0, 8: 0.9563402207755448}\n",
      "Validation Loss: 516.1892 - Validation Accuracy: 0.4388\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 61.1553\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.022148315878856496, 6: 0.0, 7: 0.0, 8: 0.9631333144636286}\n",
      "Validation Loss: 528.6134 - Validation Accuracy: 0.4324\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 58.6224\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.032054910840645344, 6: 0.0, 7: 0.002287675150128682, 8: 0.9230823662609681}\n",
      "Validation Loss: 533.4341 - Validation Accuracy: 0.4287\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 56.6516\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0818709312199264, 6: 0.009150700600514728, 7: 0.010080068630254504, 8: 0.8972544579677328}\n",
      "Validation Loss: 531.6038 - Validation Accuracy: 0.4298\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 54.9017\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.093405038211152, 6: 0.010080068630254504, 7: 0.010080068630254504, 8: 0.8710019813189923}\n",
      "Validation Loss: 530.8284 - Validation Accuracy: 0.4341\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 54.8753\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.03163034248514011, 6: 0.013297111810122962, 7: 0.00550471832999714, 8: 0.9116897820549108}\n",
      "Validation Loss: 528.6186 - Validation Accuracy: 0.4303\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 55.2876\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.04861307670534956, 6: 0.007792393480125822, 7: 0.004575350300257364, 8: 0.911760543447495}\n",
      "Validation Loss: 524.4106 - Validation Accuracy: 0.4368\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 53.3642\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.09234361732238891, 6: 0.02359164998570203, 7: 0.02151844438089791, 8: 0.8707189357486556}\n",
      "Validation Loss: 529.1275 - Validation Accuracy: 0.4312\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 53.5640\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.055901500141522786, 6: 0.04303688876179582, 7: 0.004575350300257364, 8: 0.9027030851967167}\n",
      "Validation Loss: 528.1348 - Validation Accuracy: 0.4332\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 53.4548\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.05476931786017549, 6: 0.01973119816985988, 7: 0.004575350300257364, 8: 0.8915935465609963}\n",
      "Validation Loss: 529.1038 - Validation Accuracy: 0.4329\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 53.2622\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.08795641098216812, 6: 0.0295253074063483, 7: 0.05426079496711467, 8: 0.7758279082932352}\n",
      "Validation Loss: 529.4349 - Validation Accuracy: 0.4336\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 52.8520\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.081729408434758, 6: 0.01694309408064055, 7: 0.09157849585358879, 8: 0.7862298330031136}\n",
      "Validation Loss: 529.7420 - Validation Accuracy: 0.4326\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 52.4994\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0653127653552222, 6: 0.013297111810122962, 7: 0.028381469831283958, 8: 0.8716388338522502}\n",
      "Validation Loss: 537.4646 - Validation Accuracy: 0.4310\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 54.0501\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.044367393150297196, 6: 0.002287675150128682, 7: 0.004575350300257364, 8: 0.9605859043305972}\n",
      "Validation Loss: 530.9192 - Validation Accuracy: 0.4319\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 52.3140\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.15433059722615342, 6: 0.02359164998570203, 7: 0.03410065770660566, 8: 0.8444664590999151}\n",
      "Validation Loss: 528.4126 - Validation Accuracy: 0.4339\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 52.0748\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.15185394848570619, 6: 0.04854160709179296, 7: 0.06455533314269374, 8: 0.7388196999716955}\n",
      "Validation Loss: 529.7580 - Validation Accuracy: 0.4343\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 50.5946\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.07132748372487971, 6: 0.03503002573634544, 7: 0.047826708607377755, 8: 0.8300311350127371}\n",
      "Validation Loss: 526.9985 - Validation Accuracy: 0.4387\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 51.9786\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0934757996037362, 6: 0.005719187875321705, 7: 0.0295253074063483, 8: 0.8596801585055194}\n",
      "Validation Loss: 535.5061 - Validation Accuracy: 0.4310\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 51.7473\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.16260968015850552, 6: 0.058836145267372034, 7: 0.12496425507577924, 8: 0.6472544579677328}\n",
      "Validation Loss: 527.1368 - Validation Accuracy: 0.4371\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 50.4095\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.107486555335409, 6: 0.023377180440377468, 7: 0.040963683156991705, 8: 0.7834701386923295}\n",
      "Validation Loss: 536.2142 - Validation Accuracy: 0.4306\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 49.5767\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.10706198697990377, 6: 0.046253931941664286, 7: 0.032956820131541324, 8: 0.8021511463345599}\n",
      "Validation Loss: 529.8467 - Validation Accuracy: 0.4367\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 51.0428\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.23768751769034815, 6: 0.11059479553903345, 7: 0.044395195882184726, 8: 0.6708180016982734}\n",
      "Validation Loss: 536.1674 - Validation Accuracy: 0.4321\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 49.6671\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.06602037928106425, 6: 0.01694309408064055, 7: 0.02359164998570203, 8: 0.8899660345315595}\n",
      "Validation Loss: 538.4398 - Validation Accuracy: 0.4313\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 50.4972\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.15687800735918483, 6: 0.04282241921647126, 7: 0.07463540177294824, 8: 0.7744834418341353}\n",
      "Validation Loss: 532.5776 - Validation Accuracy: 0.4314\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 49.2870\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.1834135295782621, 6: 0.03617386331140978, 7: 0.02287675150128682, 8: 0.813543730540617}\n",
      "Validation Loss: 541.3193 - Validation Accuracy: 0.4309\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 49.8807\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.14286725162751204, 6: 0.19945667715184442, 7: 0.04303688876179582, 8: 0.7013869232946505}\n",
      "Validation Loss: 531.4814 - Validation Accuracy: 0.4370\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 48.8884\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.072459666006227, 6: 0.05933657420646268, 7: 0.010080068630254504, 8: 0.8170110387772431}\n",
      "Validation Loss: 540.3470 - Validation Accuracy: 0.4291\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 49.4961\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.12694593829606568, 6: 0.04375178724621104, 7: 0.019230769230769232, 8: 0.7846023209736768}\n",
      "Validation Loss: 531.8782 - Validation Accuracy: 0.4333\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 49.0383\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.12220492499292386, 6: 0.036888761795824995, 7: 0.014655418930511867, 8: 0.8000283045570337}\n",
      "Validation Loss: 528.4730 - Validation Accuracy: 0.4357\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 51.0436\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.21568072459666007, 6: 0.08564483843294252, 7: 0.032956820131541324, 8: 0.7130625530710445}\n",
      "Validation Loss: 531.9983 - Validation Accuracy: 0.4306\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 48.3739\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.093405038211152, 6: 0.02016013726050901, 7: 0.01143837575064341, 8: 0.8728417775261818}\n",
      "Validation Loss: 531.4607 - Validation Accuracy: 0.4377\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 49.3104\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.1625389187659213, 6: 0.1564197883900486, 7: 0.051758650271661426, 8: 0.7144777809227285}\n",
      "Validation Loss: 531.2738 - Validation Accuracy: 0.4383\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 47.6813\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.13982451174639116, 6: 0.042393480125822135, 7: 0.031598513011152414, 8: 0.7779507500707614}\n",
      "Validation Loss: 537.3284 - Validation Accuracy: 0.4351\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 48.0107\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.3666147749787716, 6: 0.1191020875035745, 7: 0.06684300829282241, 8: 0.5171242570053779}\n",
      "Validation Loss: 530.0855 - Validation Accuracy: 0.4360\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 49.2340\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.17378998018681008, 6: 0.017157563625965114, 7: 0.026093794681155275, 8: 0.7296914803283329}\n",
      "Validation Loss: 530.3730 - Validation Accuracy: 0.4388\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 49.5962\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.27243136144919333, 6: 0.057048899056334, 7: 0.03066914498141264, 8: 0.6863855080667988}\n",
      "Validation Loss: 532.9666 - Validation Accuracy: 0.4349\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 48.4476\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.26302009623549394, 6: 0.09736917357735202, 7: 0.07828138404346582, 8: 0.5965185394848571}\n",
      "Validation Loss: 538.0479 - Validation Accuracy: 0.4311\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 49.8998\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.14279649023492783, 6: 0.025664855590506148, 7: 0.05997998284243637, 8: 0.7392442683272007}\n",
      "Validation Loss: 537.8336 - Validation Accuracy: 0.4344\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 47.1427\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.10996320407585622, 6: 0.01694309408064055, 7: 0.010294538175579068, 8: 0.785026889329182}\n",
      "Validation Loss: 535.8152 - Validation Accuracy: 0.4349\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 47.9985\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.34623549391452024, 6: 0.14176436945953674, 7: 0.08492993994852731, 8: 0.5212284177752619}\n",
      "Validation Loss: 530.3905 - Validation Accuracy: 0.4331\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n",
      "Epoch 41/60 - Train Loss: 46.7731\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.18284743843758844, 6: 0.04904203603088361, 7: 0.02037460680583357, 8: 0.7153976790263232}\n",
      "Validation Loss: 541.9104 - Validation Accuracy: 0.4301\n",
      "---------------\n",
      "Epoch 42/60\n",
      "---------------\n",
      "Epoch 42/60 - Train Loss: 47.8236\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.10592980469855647, 6: 0.024306548470117244, 7: 0.014655418930511867, 8: 0.7556609114067365}\n",
      "Validation Loss: 532.1399 - Validation Accuracy: 0.4372\n",
      "---------------\n",
      "Epoch 43/60\n",
      "---------------\n",
      "Epoch 43/60 - Train Loss: 48.5715\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.20733088027172375, 6: 0.09322276236774378, 7: 0.08171289676865885, 8: 0.5867534673082366}\n",
      "Validation Loss: 531.9046 - Validation Accuracy: 0.4356\n",
      "---------------\n",
      "Epoch 44/60\n",
      "---------------\n",
      "Epoch 44/60 - Train Loss: 46.5285\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.16296348712142655, 6: 0.022447812410637688, 7: 0.008006863025450386, 8: 0.8231672799320691}\n",
      "Validation Loss: 532.4718 - Validation Accuracy: 0.4393\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 45/60\n",
      "---------------\n",
      "Epoch 45/60 - Train Loss: 46.8736\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.22969148032833286, 6: 0.04032027452101802, 7: 0.018301401201029455, 8: 0.7161760543447495}\n",
      "Validation Loss: 530.6827 - Validation Accuracy: 0.4373\n",
      "---------------\n",
      "Epoch 46/60\n",
      "---------------\n",
      "Epoch 46/60 - Train Loss: 46.8579\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.23202660628361166, 6: 0.062267657992565055, 7: 0.06820131541321132, 8: 0.5697707330880272}\n",
      "Validation Loss: 532.5115 - Validation Accuracy: 0.4393\n",
      "---------------\n",
      "Epoch 47/60\n",
      "---------------\n",
      "Epoch 47/60 - Train Loss: 46.5355\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.16777526181715255, 6: 0.05061481269659708, 7: 0.04804117815270231, 8: 0.6745683555052363}\n",
      "Validation Loss: 524.6131 - Validation Accuracy: 0.4466\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 48/60\n",
      "---------------\n",
      "Epoch 48/60 - Train Loss: 47.1602\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.2981177469572601, 6: 0.12460680583357163, 7: 0.06133828996282528, 8: 0.5445796773280498}\n",
      "Validation Loss: 527.7557 - Validation Accuracy: 0.4451\n",
      "---------------\n",
      "Epoch 49/60\n",
      "---------------\n",
      "Epoch 49/60 - Train Loss: 47.0100\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.3203368242287008, 6: 0.1543465827852445, 7: 0.03066914498141264, 8: 0.5289414095669402}\n",
      "Validation Loss: 532.4902 - Validation Accuracy: 0.4370\n",
      "---------------\n",
      "Epoch 50/60\n",
      "---------------\n",
      "Epoch 50/60 - Train Loss: 47.7085\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.15348146051514294, 6: 0.05061481269659708, 7: 0.02745210180154418, 8: 0.7282762524766487}\n",
      "Validation Loss: 534.1938 - Validation Accuracy: 0.4348\n",
      "---------------\n",
      "Epoch 51/60\n",
      "---------------\n",
      "Epoch 51/60 - Train Loss: 47.1035\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.14690065100481178, 6: 0.09300829282241922, 7: 0.03524449528167, 8: 0.6727993206906312}\n",
      "Validation Loss: 532.3962 - Validation Accuracy: 0.4408\n",
      "---------------\n",
      "Epoch 52/60\n",
      "---------------\n",
      "Epoch 52/60 - Train Loss: 47.6080\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.34892442683272007, 6: 0.08821847297683728, 7: 0.057191878753217046, 8: 0.5450042456835551}\n",
      "Validation Loss: 530.8385 - Validation Accuracy: 0.4356\n",
      "---------------\n",
      "Epoch 53/60\n",
      "---------------\n",
      "Epoch 53/60 - Train Loss: 47.9467\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.13013020096235495, 6: 0.013297111810122962, 7: 0.0, 8: 0.8059015001415227}\n",
      "Validation Loss: 537.4034 - Validation Accuracy: 0.4313\n",
      "---------------\n",
      "Epoch 54/60\n",
      "---------------\n",
      "Epoch 54/60 - Train Loss: 47.2072\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.19607981885083497, 6: 0.10923648841864456, 7: 0.03867600800686303, 8: 0.6139258420605718}\n",
      "Validation Loss: 530.4191 - Validation Accuracy: 0.4362\n",
      "---------------\n",
      "Epoch 55/60\n",
      "---------------\n",
      "Epoch 55/60 - Train Loss: 47.0635\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.04705632606849703, 6: 0.03002573634543895, 7: 0.0, 8: 0.9157939428247948}\n",
      "Validation Loss: 531.3803 - Validation Accuracy: 0.4373\n",
      "---------------\n",
      "Epoch 56/60\n",
      "---------------\n",
      "Epoch 56/60 - Train Loss: 45.8370\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.13055476931786017, 6: 0.0382470689162139, 7: 0.0, 8: 0.7780922728559299}\n",
      "Validation Loss: 534.9055 - Validation Accuracy: 0.4353\n",
      "---------------\n",
      "Epoch 57/60\n",
      "---------------\n",
      "Epoch 57/60 - Train Loss: 47.7810\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.22410133031418059, 6: 0.14476694309408064, 7: 0.08128395767800972, 8: 0.6104585338239457}\n",
      "Validation Loss: 536.6449 - Validation Accuracy: 0.4357\n",
      "---------------\n",
      "Epoch 58/60\n",
      "---------------\n",
      "Epoch 58/60 - Train Loss: 45.9193\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.00665742024965326, 5: 0.14215963770166998, 6: 0.003217043179868459, 7: 0.03617386331140978, 8: 0.7287715822247381}\n",
      "Validation Loss: 526.6634 - Validation Accuracy: 0.4423\n",
      "---------------\n",
      "Epoch 59/60\n",
      "---------------\n",
      "Epoch 59/60 - Train Loss: 43.8216\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.1728700820832154, 6: 0.051258221332570776, 7: 0.017157563625965114, 8: 0.7282054910840645}\n",
      "Validation Loss: 531.0104 - Validation Accuracy: 0.4390\n",
      "---------------\n",
      "Epoch 60/60\n",
      "---------------\n",
      "Epoch 60/60 - Train Loss: 45.8279\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.21320407585621284, 6: 0.09186445524735487, 7: 0.047826708607377755, 8: 0.5733088027172375}\n",
      "Validation Loss: 536.5193 - Validation Accuracy: 0.4392\n",
      "Loss on test set: 541.5801\n",
      "Accuracy on test set: 0.4392\n",
      "Incremental ARC Classifier added, current len: 10\n",
      "Incremental Task Layer added, current len: 10\n",
      "tsk: 9\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0', 'incremental_layers.1', 'incremental_layers.2', 'incremental_layers.3', 'incremental_layers.4', 'incremental_layers.5', 'incremental_layers.6', 'incremental_layers.7', 'incremental_layers.8', 'arc_classifiers.0.4', 'arc_classifiers.1.4', 'arc_classifiers.2.4', 'arc_classifiers.3.4', 'arc_classifiers.4.4', 'arc_classifiers.5.4', 'arc_classifiers.6.4', 'arc_classifiers.7.4', 'arc_classifiers.7.4']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "----Freezing incremental_layers.1.gate_generator.0.weight\n",
      "----Freezing incremental_layers.1.gate_generator.0.bias\n",
      "----Freezing incremental_layers.1.gate_generator.1.weight\n",
      "----Freezing incremental_layers.1.gate_generator.1.bias\n",
      "----Freezing incremental_layers.1.gate_generator.5.weight\n",
      "----Freezing incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "----Freezing incremental_layers.2.gate_generator.0.weight\n",
      "----Freezing incremental_layers.2.gate_generator.0.bias\n",
      "----Freezing incremental_layers.2.gate_generator.1.weight\n",
      "----Freezing incremental_layers.2.gate_generator.1.bias\n",
      "----Freezing incremental_layers.2.gate_generator.5.weight\n",
      "----Freezing incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing incremental_layers.3.layer.0.weight\n",
      "----Freezing incremental_layers.3.layer.0.bias\n",
      "----Freezing incremental_layers.3.layer.1.weight\n",
      "----Freezing incremental_layers.3.layer.1.bias\n",
      "----Freezing incremental_layers.3.layer.3.weight\n",
      "----Freezing incremental_layers.3.layer.3.bias\n",
      "----Freezing incremental_layers.3.layer.4.weight\n",
      "----Freezing incremental_layers.3.layer.4.bias\n",
      "----Freezing incremental_layers.3.layer.6.weight\n",
      "----Freezing incremental_layers.3.layer.6.bias\n",
      "----Freezing incremental_layers.3.layer.7.weight\n",
      "----Freezing incremental_layers.3.layer.7.bias\n",
      "----Freezing incremental_layers.3.gate_generator.0.weight\n",
      "----Freezing incremental_layers.3.gate_generator.0.bias\n",
      "----Freezing incremental_layers.3.gate_generator.1.weight\n",
      "----Freezing incremental_layers.3.gate_generator.1.bias\n",
      "----Freezing incremental_layers.3.gate_generator.5.weight\n",
      "----Freezing incremental_layers.3.gate_generator.5.bias\n",
      "----Freezing incremental_layers.4.layer.0.weight\n",
      "----Freezing incremental_layers.4.layer.0.bias\n",
      "----Freezing incremental_layers.4.layer.1.weight\n",
      "----Freezing incremental_layers.4.layer.1.bias\n",
      "----Freezing incremental_layers.4.layer.3.weight\n",
      "----Freezing incremental_layers.4.layer.3.bias\n",
      "----Freezing incremental_layers.4.layer.4.weight\n",
      "----Freezing incremental_layers.4.layer.4.bias\n",
      "----Freezing incremental_layers.4.layer.6.weight\n",
      "----Freezing incremental_layers.4.layer.6.bias\n",
      "----Freezing incremental_layers.4.layer.7.weight\n",
      "----Freezing incremental_layers.4.layer.7.bias\n",
      "----Freezing incremental_layers.4.gate_generator.0.weight\n",
      "----Freezing incremental_layers.4.gate_generator.0.bias\n",
      "----Freezing incremental_layers.4.gate_generator.1.weight\n",
      "----Freezing incremental_layers.4.gate_generator.1.bias\n",
      "----Freezing incremental_layers.4.gate_generator.5.weight\n",
      "----Freezing incremental_layers.4.gate_generator.5.bias\n",
      "----Freezing incremental_layers.5.layer.0.weight\n",
      "----Freezing incremental_layers.5.layer.0.bias\n",
      "----Freezing incremental_layers.5.layer.1.weight\n",
      "----Freezing incremental_layers.5.layer.1.bias\n",
      "----Freezing incremental_layers.5.layer.3.weight\n",
      "----Freezing incremental_layers.5.layer.3.bias\n",
      "----Freezing incremental_layers.5.layer.4.weight\n",
      "----Freezing incremental_layers.5.layer.4.bias\n",
      "----Freezing incremental_layers.5.layer.6.weight\n",
      "----Freezing incremental_layers.5.layer.6.bias\n",
      "----Freezing incremental_layers.5.layer.7.weight\n",
      "----Freezing incremental_layers.5.layer.7.bias\n",
      "----Freezing incremental_layers.5.gate_generator.0.weight\n",
      "----Freezing incremental_layers.5.gate_generator.0.bias\n",
      "----Freezing incremental_layers.5.gate_generator.1.weight\n",
      "----Freezing incremental_layers.5.gate_generator.1.bias\n",
      "----Freezing incremental_layers.5.gate_generator.5.weight\n",
      "----Freezing incremental_layers.5.gate_generator.5.bias\n",
      "----Freezing incremental_layers.6.layer.0.weight\n",
      "----Freezing incremental_layers.6.layer.0.bias\n",
      "----Freezing incremental_layers.6.layer.1.weight\n",
      "----Freezing incremental_layers.6.layer.1.bias\n",
      "----Freezing incremental_layers.6.layer.3.weight\n",
      "----Freezing incremental_layers.6.layer.3.bias\n",
      "----Freezing incremental_layers.6.layer.4.weight\n",
      "----Freezing incremental_layers.6.layer.4.bias\n",
      "----Freezing incremental_layers.6.layer.6.weight\n",
      "----Freezing incremental_layers.6.layer.6.bias\n",
      "----Freezing incremental_layers.6.layer.7.weight\n",
      "----Freezing incremental_layers.6.layer.7.bias\n",
      "----Freezing incremental_layers.6.gate_generator.0.weight\n",
      "----Freezing incremental_layers.6.gate_generator.0.bias\n",
      "----Freezing incremental_layers.6.gate_generator.1.weight\n",
      "----Freezing incremental_layers.6.gate_generator.1.bias\n",
      "----Freezing incremental_layers.6.gate_generator.5.weight\n",
      "----Freezing incremental_layers.6.gate_generator.5.bias\n",
      "----Freezing incremental_layers.7.layer.0.weight\n",
      "----Freezing incremental_layers.7.layer.0.bias\n",
      "----Freezing incremental_layers.7.layer.1.weight\n",
      "----Freezing incremental_layers.7.layer.1.bias\n",
      "----Freezing incremental_layers.7.layer.3.weight\n",
      "----Freezing incremental_layers.7.layer.3.bias\n",
      "----Freezing incremental_layers.7.layer.4.weight\n",
      "----Freezing incremental_layers.7.layer.4.bias\n",
      "----Freezing incremental_layers.7.layer.6.weight\n",
      "----Freezing incremental_layers.7.layer.6.bias\n",
      "----Freezing incremental_layers.7.layer.7.weight\n",
      "----Freezing incremental_layers.7.layer.7.bias\n",
      "----Freezing incremental_layers.7.gate_generator.0.weight\n",
      "----Freezing incremental_layers.7.gate_generator.0.bias\n",
      "----Freezing incremental_layers.7.gate_generator.1.weight\n",
      "----Freezing incremental_layers.7.gate_generator.1.bias\n",
      "----Freezing incremental_layers.7.gate_generator.5.weight\n",
      "----Freezing incremental_layers.7.gate_generator.5.bias\n",
      "----Freezing incremental_layers.8.layer.0.weight\n",
      "----Freezing incremental_layers.8.layer.0.bias\n",
      "----Freezing incremental_layers.8.layer.1.weight\n",
      "----Freezing incremental_layers.8.layer.1.bias\n",
      "----Freezing incremental_layers.8.layer.3.weight\n",
      "----Freezing incremental_layers.8.layer.3.bias\n",
      "----Freezing incremental_layers.8.layer.4.weight\n",
      "----Freezing incremental_layers.8.layer.4.bias\n",
      "----Freezing incremental_layers.8.layer.6.weight\n",
      "----Freezing incremental_layers.8.layer.6.bias\n",
      "----Freezing incremental_layers.8.layer.7.weight\n",
      "----Freezing incremental_layers.8.layer.7.bias\n",
      "----Freezing incremental_layers.8.gate_generator.0.weight\n",
      "----Freezing incremental_layers.8.gate_generator.0.bias\n",
      "----Freezing incremental_layers.8.gate_generator.1.weight\n",
      "----Freezing incremental_layers.8.gate_generator.1.bias\n",
      "----Freezing incremental_layers.8.gate_generator.5.weight\n",
      "----Freezing incremental_layers.8.gate_generator.5.bias\n",
      "incremental_layers.9.layer.0.weight\n",
      "incremental_layers.9.layer.0.bias\n",
      "incremental_layers.9.layer.1.weight\n",
      "incremental_layers.9.layer.1.bias\n",
      "incremental_layers.9.layer.3.weight\n",
      "incremental_layers.9.layer.3.bias\n",
      "incremental_layers.9.layer.4.weight\n",
      "incremental_layers.9.layer.4.bias\n",
      "incremental_layers.9.layer.6.weight\n",
      "incremental_layers.9.layer.6.bias\n",
      "incremental_layers.9.layer.7.weight\n",
      "incremental_layers.9.layer.7.bias\n",
      "incremental_layers.9.gate_generator.0.weight\n",
      "incremental_layers.9.gate_generator.0.bias\n",
      "incremental_layers.9.gate_generator.1.weight\n",
      "incremental_layers.9.gate_generator.1.bias\n",
      "incremental_layers.9.gate_generator.5.weight\n",
      "incremental_layers.9.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "----Freezing arc_classifiers.3.4.weight\n",
      "arc_classifiers.4.2.weight\n",
      "arc_classifiers.4.2.bias\n",
      "----Freezing arc_classifiers.4.4.weight\n",
      "arc_classifiers.5.2.weight\n",
      "arc_classifiers.5.2.bias\n",
      "----Freezing arc_classifiers.5.4.weight\n",
      "arc_classifiers.6.2.weight\n",
      "arc_classifiers.6.2.bias\n",
      "----Freezing arc_classifiers.6.4.weight\n",
      "arc_classifiers.7.2.weight\n",
      "arc_classifiers.7.2.bias\n",
      "----Freezing arc_classifiers.7.4.weight\n",
      "----Freezing arc_classifiers.7.4.weight\n",
      "arc_classifiers.8.2.weight\n",
      "arc_classifiers.8.2.bias\n",
      "arc_classifiers.8.4.weight\n",
      "arc_classifiers.9.2.weight\n",
      "arc_classifiers.9.2.bias\n",
      "arc_classifiers.9.4.weight\n",
      "alpha_cls: [214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      "   8.59992604   8.59992604   8.59992604   8.59992604   8.59992604]\n",
      "alpha_tsk: [ 4.29998151 42.99815108 42.99815108 42.99815108 42.99815108 42.99815108\n",
      " 42.99815108 42.99815108 42.99815108  1.71999704]\n",
      "num_prev_task_classes: 90\n",
      "alpha_cls: [214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      " 214.95378494 214.95378494 214.95378494 214.95378494 214.95378494\n",
      "   8.59992604   8.59992604   8.59992604   8.59992604   8.59992604]\n",
      "alpha_tsk: [ 25.07381025 250.72839937 250.72839937 250.72839937 250.72839937\n",
      " 250.72839937 250.72839937 250.72839937 250.72839937  10.02954998]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b7052cd258b146d9a5a5157e3b1faa7a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/2040 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/60\n",
      "---------------\n",
      "Epoch 1/60 - Train Loss: 86.6639\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.06127936597792245, 6: 0.0, 7: 0.0, 8: 0.09623549391452024, 9: 0.9388349514563107}\n",
      "Validation Loss: 563.3865 - Validation Accuracy: 0.4039\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/60\n",
      "---------------\n",
      "Epoch 2/60 - Train Loss: 72.0476\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0619162185111803, 6: 0.0, 7: 0.0, 8: 0.07401641664307954, 9: 0.9635228848821081}\n",
      "Validation Loss: 564.8558 - Validation Accuracy: 0.4088\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/60\n",
      "---------------\n",
      "Epoch 3/60 - Train Loss: 67.6500\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.07691763373903199, 6: 0.002287675150128682, 7: 0.0, 8: 0.10720350976507217, 9: 0.940499306518724}\n",
      "Validation Loss: 559.7246 - Validation Accuracy: 0.4166\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/60\n",
      "---------------\n",
      "Epoch 4/60 - Train Loss: 68.6784\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.010826493065383526, 6: 0.0, 7: 0.0, 8: 0.060430229266911974, 9: 0.9457697642163662}\n",
      "Validation Loss: 558.3576 - Validation Accuracy: 0.4149\n",
      "---------------\n",
      "Epoch 5/60\n",
      "---------------\n",
      "Epoch 5/60 - Train Loss: 66.8205\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.02979054627795075, 6: 0.001143837575064341, 7: 0.0, 8: 0.0747240305689216, 9: 0.9509015256588073}\n",
      "Validation Loss: 564.5172 - Validation Accuracy: 0.4140\n",
      "---------------\n",
      "Epoch 6/60\n",
      "---------------\n",
      "Epoch 6/60 - Train Loss: 64.9667\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.02349278233795641, 6: 0.0020732056048041177, 7: 0.0, 8: 0.05363713557882819, 9: 0.9517337031900138}\n",
      "Validation Loss: 566.0189 - Validation Accuracy: 0.4120\n",
      "---------------\n",
      "Epoch 7/60\n",
      "---------------\n",
      "Epoch 7/60 - Train Loss: 65.2299\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.02349278233795641, 6: 0.007792393480125822, 7: 0.0, 8: 0.10423153127653552, 9: 0.9123439667128987}\n",
      "Validation Loss: 564.0693 - Validation Accuracy: 0.4141\n",
      "---------------\n",
      "Epoch 8/60\n",
      "---------------\n",
      "Epoch 8/60 - Train Loss: 60.3874\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.030073591848287576, 6: 0.0009293680297397769, 7: 0.0, 8: 0.06609114067364846, 9: 0.9472954230235784}\n",
      "Validation Loss: 566.8799 - Validation Accuracy: 0.4164\n",
      "---------------\n",
      "Epoch 9/60\n",
      "---------------\n",
      "Epoch 9/60 - Train Loss: 61.8170\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.04181998301726578, 6: 0.00550471832999714, 7: 0.0, 8: 0.11873761675629776, 9: 0.8762135922330098}\n",
      "Validation Loss: 569.0418 - Validation Accuracy: 0.4173\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 10/60\n",
      "---------------\n",
      "Epoch 10/60 - Train Loss: 63.3325\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.01761958675346731, 6: 0.003217043179868459, 7: 0.0, 8: 0.08194169261251061, 9: 0.9149098474341193}\n",
      "Validation Loss: 562.8906 - Validation Accuracy: 0.4205\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 11/60\n",
      "---------------\n",
      "Epoch 11/60 - Train Loss: 61.4629\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.062199264081517124, 6: 0.007792393480125822, 7: 0.0, 8: 0.16827059156524202, 9: 0.8794729542302357}\n",
      "Validation Loss: 560.4881 - Validation Accuracy: 0.4196\n",
      "---------------\n",
      "Epoch 12/60\n",
      "---------------\n",
      "Epoch 12/60 - Train Loss: 61.2137\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.06878007359184829, 6: 0.003217043179868459, 7: 0.0, 8: 0.14562694593829606, 9: 0.8583217753120665}\n",
      "Validation Loss: 561.9097 - Validation Accuracy: 0.4174\n",
      "---------------\n",
      "Epoch 13/60\n",
      "---------------\n",
      "Epoch 13/60 - Train Loss: 61.0917\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.08505519388621569, 6: 0.007792393480125822, 7: 0.0, 8: 0.15857628078120578, 9: 0.8849514563106796}\n",
      "Validation Loss: 566.2342 - Validation Accuracy: 0.4163\n",
      "---------------\n",
      "Epoch 14/60\n",
      "---------------\n",
      "Epoch 14/60 - Train Loss: 59.9531\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0701245400509482, 6: 0.012367743780383186, 7: 0.0, 8: 0.17980469855646758, 9: 0.8237170596393897}\n",
      "Validation Loss: 566.8429 - Validation Accuracy: 0.4182\n",
      "---------------\n",
      "Epoch 15/60\n",
      "---------------\n",
      "Epoch 15/60 - Train Loss: 60.1722\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.08434757996037362, 6: 0.013297111810122962, 7: 0.0, 8: 0.2054910840645344, 9: 0.8723300970873786}\n",
      "Validation Loss: 566.6330 - Validation Accuracy: 0.4206\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 16/60\n",
      "---------------\n",
      "Epoch 16/60 - Train Loss: 59.4100\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0992782337956411, 6: 0.011223906205318844, 7: 0.0, 8: 0.2102320973676762, 9: 0.8283633841886269}\n",
      "Validation Loss: 564.6175 - Validation Accuracy: 0.4176\n",
      "---------------\n",
      "Epoch 17/60\n",
      "---------------\n",
      "Epoch 17/60 - Train Loss: 59.5745\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.05901500141522785, 6: 0.00550471832999714, 7: 0.0, 8: 0.193673931502972, 9: 0.8149791955617198}\n",
      "Validation Loss: 564.7753 - Validation Accuracy: 0.4180\n",
      "---------------\n",
      "Epoch 18/60\n",
      "---------------\n",
      "Epoch 18/60 - Train Loss: 58.8193\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.040475516558165864, 6: 0.015799256505576207, 7: 0.0, 8: 0.2862298330031135, 9: 0.6762135922330097}\n",
      "Validation Loss: 567.6083 - Validation Accuracy: 0.4163\n",
      "---------------\n",
      "Epoch 19/60\n",
      "---------------\n",
      "Epoch 19/60 - Train Loss: 60.8600\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.07621001981318992, 6: 0.015799256505576207, 7: 0.0, 8: 0.21773280498160205, 9: 0.781622746185853}\n",
      "Validation Loss: 562.4982 - Validation Accuracy: 0.4231\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 20/60\n",
      "---------------\n",
      "Epoch 20/60 - Train Loss: 58.8268\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.10246249646193037, 6: 0.014440949385187304, 7: 0.0, 8: 0.27278516841211436, 9: 0.6434812760055478}\n",
      "Validation Loss: 568.2800 - Validation Accuracy: 0.4213\n",
      "---------------\n",
      "Epoch 21/60\n",
      "---------------\n",
      "Epoch 21/60 - Train Loss: 58.8338\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.07621001981318992, 6: 0.014655418930511867, 7: 0.0, 8: 0.28587602604019247, 9: 0.8124826629680999}\n",
      "Validation Loss: 559.6975 - Validation Accuracy: 0.4275\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 22/60\n",
      "---------------\n",
      "Epoch 22/60 - Train Loss: 57.8821\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.06559581092555901, 6: 0.01694309408064055, 7: 0.0, 8: 0.1986272289838664, 9: 0.839251040221914}\n",
      "Validation Loss: 565.4646 - Validation Accuracy: 0.4245\n",
      "---------------\n",
      "Epoch 23/60\n",
      "---------------\n",
      "Epoch 23/60 - Train Loss: 58.6883\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.10585904330597226, 6: 0.019945667715184443, 7: 0.002287675150128682, 8: 0.26592131333144636, 9: 0.682246879334258}\n",
      "Validation Loss: 561.8233 - Validation Accuracy: 0.4208\n",
      "---------------\n",
      "Epoch 24/60\n",
      "---------------\n",
      "Epoch 24/60 - Train Loss: 56.5567\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.08731955844891028, 6: 0.014440949385187304, 7: 0.0, 8: 0.2348570619869799, 9: 0.7894590846047157}\n",
      "Validation Loss: 564.3887 - Validation Accuracy: 0.4214\n",
      "---------------\n",
      "Epoch 25/60\n",
      "---------------\n",
      "Epoch 25/60 - Train Loss: 57.3360\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.10409000849136711, 6: 0.02016013726050901, 7: 0.0, 8: 0.29875459949051797, 9: 0.6685852981969487}\n",
      "Validation Loss: 566.7343 - Validation Accuracy: 0.4177\n",
      "---------------\n",
      "Epoch 26/60\n",
      "---------------\n",
      "Epoch 26/60 - Train Loss: 57.2507\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.10359467874327767, 6: 0.017872462110380325, 7: 0.0, 8: 0.3796348712142655, 9: 0.7296809986130375}\n",
      "Validation Loss: 565.9084 - Validation Accuracy: 0.4178\n",
      "---------------\n",
      "Epoch 27/60\n",
      "---------------\n",
      "Epoch 27/60 - Train Loss: 58.7758\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.1330314180583074, 6: 0.02016013726050901, 7: 0.002287675150128682, 8: 0.5347438437588452, 9: 0.4426490984743412}\n",
      "Validation Loss: 561.8281 - Validation Accuracy: 0.4208\n",
      "---------------\n",
      "Epoch 28/60\n",
      "---------------\n",
      "Epoch 28/60 - Train Loss: 57.6879\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.08753184262666289, 6: 0.013511581355447527, 7: 0.0, 8: 0.37984715539201813, 9: 0.6808599167822469}\n",
      "Validation Loss: 569.8967 - Validation Accuracy: 0.4164\n",
      "---------------\n",
      "Epoch 29/60\n",
      "---------------\n",
      "Epoch 29/60 - Train Loss: 57.2090\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.07168129068780074, 6: 0.010080068630254504, 7: 0.0, 8: 0.23181432210585903, 9: 0.6945214979195562}\n",
      "Validation Loss: 561.3952 - Validation Accuracy: 0.4292\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 30/60\n",
      "---------------\n",
      "Epoch 30/60 - Train Loss: 57.1991\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.08866402490801019, 6: 0.016728624535315983, 7: 0.0, 8: 0.2521936031701104, 9: 0.6855062413314841}\n",
      "Validation Loss: 563.2138 - Validation Accuracy: 0.4211\n",
      "---------------\n",
      "Epoch 31/60\n",
      "---------------\n",
      "Epoch 31/60 - Train Loss: 56.1846\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.08279082932352108, 6: 0.025664855590506148, 7: 0.0, 8: 0.2865128785734503, 9: 0.6918169209431345}\n",
      "Validation Loss: 565.4091 - Validation Accuracy: 0.4194\n",
      "---------------\n",
      "Epoch 32/60\n",
      "---------------\n",
      "Epoch 32/60 - Train Loss: 55.6561\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.1330314180583074, 6: 0.024521018015441806, 7: 0.0, 8: 0.29245683555052365, 9: 0.5714285714285714}\n",
      "Validation Loss: 563.4692 - Validation Accuracy: 0.4176\n",
      "---------------\n",
      "Epoch 33/60\n",
      "---------------\n",
      "Epoch 33/60 - Train Loss: 58.4469\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.12078969714123974, 6: 0.016514154989991422, 7: 0.0, 8: 0.392513444664591, 9: 0.532246879334258}\n",
      "Validation Loss: 563.4033 - Validation Accuracy: 0.4199\n",
      "---------------\n",
      "Epoch 34/60\n",
      "---------------\n",
      "Epoch 34/60 - Train Loss: 56.1703\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.09680158505519389, 6: 0.015799256505576207, 7: 0.0, 8: 0.3193461647325219, 9: 0.6825936199722608}\n",
      "Validation Loss: 566.9369 - Validation Accuracy: 0.4206\n",
      "---------------\n",
      "Epoch 35/60\n",
      "---------------\n",
      "Epoch 35/60 - Train Loss: 55.3733\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.09092838947070478, 6: 0.02130397483557335, 7: 0.002287675150128682, 8: 0.27066232663458817, 9: 0.7148404993065187}\n",
      "Validation Loss: 565.0817 - Validation Accuracy: 0.4215\n",
      "---------------\n",
      "Epoch 36/60\n",
      "---------------\n",
      "Epoch 36/60 - Train Loss: 55.9070\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.13982451174639116, 6: 0.024521018015441806, 7: 0.002287675150128682, 8: 0.3879139541466176, 9: 0.5181692094313454}\n",
      "Validation Loss: 560.8037 - Validation Accuracy: 0.4224\n",
      "---------------\n",
      "Epoch 37/60\n",
      "---------------\n",
      "Epoch 37/60 - Train Loss: 57.2250\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.004438280166435506, 5: 0.10769883951316161, 6: 0.017872462110380325, 7: 0.0, 8: 0.44841494480611377, 9: 0.510124826629681}\n",
      "Validation Loss: 567.3400 - Validation Accuracy: 0.4168\n",
      "---------------\n",
      "Epoch 38/60\n",
      "---------------\n",
      "Epoch 38/60 - Train Loss: 55.2786\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.002219140083217753, 5: 0.11038777243136146, 6: 0.019016299685444667, 7: 0.002287675150128682, 8: 0.4241437871497311, 9: 0.6242024965325936}\n",
      "Validation Loss: 562.4610 - Validation Accuracy: 0.4234\n",
      "---------------\n",
      "Epoch 39/60\n",
      "---------------\n",
      "Epoch 39/60 - Train Loss: 55.3319\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.11265213699405605, 6: 0.022447812410637688, 7: 0.0, 8: 0.3508349844324936, 9: 0.564008321775312}\n",
      "Validation Loss: 562.6562 - Validation Accuracy: 0.4195\n",
      "---------------\n",
      "Epoch 40/60\n",
      "---------------\n",
      "Epoch 40/60 - Train Loss: 55.9757\n",
      "Per Task Accuracy: {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.09248514010755732, 6: 0.011938804689734057, 7: 0.0, 8: 0.3342060571752052, 9: 0.7033980582524272}\n",
      "Validation Loss: 566.1393 - Validation Accuracy: 0.4168\n",
      "---------------\n",
      "Epoch 41/60\n",
      "---------------\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[58], line 96\u001b[0m\n\u001b[1;32m     94\u001b[0m criterion_label\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m     95\u001b[0m model\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m---> 96\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[43mtrain_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     97\u001b[0m \u001b[43m            \u001b[49m\u001b[43mtask_class_orders\u001b[49m\u001b[43m,\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcombined_train_loader\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mval\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcombined_test_loader\u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     98\u001b[0m \u001b[43m            \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43mweight_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43mscheduler\u001b[49m\u001b[43m,\u001b[49m\u001b[43mlabel_mapping\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlabel_mapping\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     99\u001b[0m \u001b[43m            \u001b[49m\u001b[43mnum_epochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_epochs\u001b[49m\u001b[43m,\u001b[49m\u001b[43muse_weighted_loss\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m    102\u001b[0m \u001b[38;5;66;03m# 在完整数据集上评估模型\u001b[39;00m\n\u001b[1;32m    103\u001b[0m loss, accuracy \u001b[38;5;241m=\u001b[39m evaluate_model(device,label_mapping,model, {\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mval\u001b[39m\u001b[38;5;124m'\u001b[39m: combined_test_loader}, criterion)\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/train_eval.py:184\u001b[0m, in \u001b[0;36mtrain_model\u001b[0;34m(device, model, task_class_orders, dataloaders, criterion, criterion_label, weight_label, optimizer, scheduler, label_mapping, num_epochs, stop_accuracy, use_weighted_loss, weighted_loss_momentum)\u001b[0m\n\u001b[1;32m    181\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_epochs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    182\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m-\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m15\u001b[39m)\n\u001b[0;32m--> 184\u001b[0m train_loss, _, _, _ \u001b[38;5;241m=\u001b[39m \u001b[43mtrain_one_epoch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43mdataloaders\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43mweight_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpbar\u001b[49m\u001b[43m,\u001b[49m\u001b[43mlabel_mapping_tensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtask_label_mapping_tensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclass_weights\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    185\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_epochs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m - Train Loss: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtrain_loss\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m.4f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    186\u001b[0m val_loss, val_accuracy, _,_ \u001b[38;5;241m=\u001b[39m validate_one_epoch(model, dataloaders[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mval\u001b[39m\u001b[38;5;124m'\u001b[39m], criterion,label_mapping_tensor, task_label_mapping_tensor,device)\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/train_eval.py:29\u001b[0m, in \u001b[0;36mtrain_one_epoch\u001b[0;34m(model, dataloader, criterion, criterion_label, weight_label, optimizer, device, pbar, label_mapping_tensor, task_label_mapping_tensor, class_weights)\u001b[0m\n\u001b[1;32m     26\u001b[0m inputs, rwa_labels \u001b[38;5;241m=\u001b[39m inputs\u001b[38;5;241m.\u001b[39mto(device), rwa_labels\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[1;32m     27\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n\u001b[0;32m---> 29\u001b[0m _, outputs,tsk_outputs \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     30\u001b[0m \u001b[38;5;66;03m#print(f\"tsk_outputs shape: {tsk_outputs.shape}\")\u001b[39;00m\n\u001b[1;32m     31\u001b[0m labels \u001b[38;5;241m=\u001b[39m label_mapping_tensor[rwa_labels]\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/networkv1.py:164\u001b[0m, in \u001b[0;36mArcMultiTaskModel.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m    162\u001b[0m \u001b[38;5;66;03m#print(x.shape)\u001b[39;00m\n\u001b[1;32m    163\u001b[0m features\u001b[38;5;241m.\u001b[39mappend(x)\n\u001b[0;32m--> 164\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43marc_classifiers\u001b[49m\u001b[43m[\u001b[49m\u001b[43midx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    166\u001b[0m arc_predictions\u001b[38;5;241m.\u001b[39mappend(output)\n\u001b[1;32m    167\u001b[0m gate_weights\u001b[38;5;241m.\u001b[39mappend(gate_weight\u001b[38;5;241m.\u001b[39msqueeze())\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/container.py:250\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    248\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[1;32m    249\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m module \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m:\n\u001b[0;32m--> 250\u001b[0m         \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m    251\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28minput\u001b[39m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/networkv1.py:29\u001b[0m, in \u001b[0;36mAddWeightProduct.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m     27\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m\u001b[38;5;241m.\u001b[39mview(\u001b[38;5;28minput\u001b[39m\u001b[38;5;241m.\u001b[39msize(\u001b[38;5;241m0\u001b[39m), \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m)\n\u001b[1;32m     28\u001b[0m cosine \u001b[38;5;241m=\u001b[39m nn\u001b[38;5;241m.\u001b[39mfunctional\u001b[38;5;241m.\u001b[39mlinear(nn\u001b[38;5;241m.\u001b[39mfunctional\u001b[38;5;241m.\u001b[39mnormalize(\u001b[38;5;28minput\u001b[39m), nn\u001b[38;5;241m.\u001b[39mfunctional\u001b[38;5;241m.\u001b[39mnormalize(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mweight))\n\u001b[0;32m---> 29\u001b[0m sine \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39msqrt(\u001b[43m(\u001b[49m\u001b[38;5;241;43m1.0\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpow\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcosine\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mclamp\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m)\n\u001b[1;32m     30\u001b[0m phi \u001b[38;5;241m=\u001b[39m cosine \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcos_m \u001b[38;5;241m-\u001b[39m sine \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msin_m\n\u001b[1;32m     31\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39measy_margin:\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "for tsk in range(1,10):\n",
    "\n",
    "    model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[tsk], s=s, m=m)\n",
    "    model.add_incremental_task_layer() \n",
    "    #print('tsk:',tsk)\n",
    "    '''freeze_prefix = init_freeze_prefixes\n",
    "    if tsk == 0:\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk))\n",
    "    else:\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk)+'.layer' )\n",
    "\n",
    "    freeze_prefix.append('arc_classifiers.' + str(tsk))'''\n",
    "\n",
    "    #-----------------------------------------------\n",
    "    init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    #'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "    print('tsk:',tsk)\n",
    "    #print('tsk:',tsk)\n",
    "    freeze_prefix = init_freeze_prefixes\n",
    "\n",
    "    if tsk == 1:\n",
    "         freeze_prefix.append( 'incremental_layers.' + str(tsk-1) )\n",
    "    else:\n",
    "        for layer in range(0,tsk-1):\n",
    "            freeze_prefix.append( 'incremental_layers.' + str(layer) )\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk-1))\n",
    "    \n",
    "    if tsk> 1:\n",
    "        for layer in range(0,tsk-1):\n",
    "            freeze_prefix.append('arc_classifiers.' + str(layer)+'.4')\n",
    "        freeze_prefix.append('arc_classifiers.' + str(layer)+'.4')\n",
    "    #-----------------------------------------------\n",
    "    print('freeze_prefix:',freeze_prefix)\n",
    "    freeze_layers(model, freeze_prefix)\n",
    "\n",
    "    combined_train_set = []\n",
    "    for i in range(tsk):\n",
    "        #print(i)\n",
    "        # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "        # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "        combined_train_set.append(exampler_train_sets[i])\n",
    "    combined_train_set.append(origin_train_set[tsk])\n",
    "    combined_train_set=ConcatDataset(combined_train_set)\n",
    "    # 创建一个新的DataLoader来加载合并后的数据集\n",
    "    combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                    batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "\n",
    "\n",
    "    # 使用ConcatDataset合并这两个数据集\n",
    "    combined_test_set = []\n",
    "    for i in range(tsk+1):\n",
    "        # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "        # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "        combined_test_set.append(origin_test_set[i])\n",
    "    combined_test_set=ConcatDataset(combined_test_set)\n",
    "    # 创建一个新的DataLoader来加载合并后的数据集\n",
    "    combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                    batch_size=batch_size, shuffle=False, num_workers=4)\n",
    "    \n",
    "\n",
    "    num_epochs=60\n",
    "    weight_decay=0.01\n",
    "    max_lr=0.01\n",
    "    #weight_label=1.5+((tsk-1)*0.16)\n",
    "    weight_label=0.5\n",
    "    # 创建调度器和优化器\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "    scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                    steps_per_epoch=len(combined_train_loader),three_phase=False)\n",
    "    class_stats = get_class_stats(combined_train_loader)\n",
    "    # 计算补齐后的 alpha\n",
    "    alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "    alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "    print(\"alpha_cls:\", alpha_cls)\n",
    "    print(\"alpha_tsk:\", alpha_tsk)\n",
    "    alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                    alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                    w_cls=1, w_tsk=1)\n",
    "    \n",
    "    #alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "    alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "    print(\"alpha_cls:\", alpha_cls)\n",
    "    print(\"alpha_tsk:\", alpha_tsk)\n",
    "    criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=0,reduction='mean') \n",
    "    #criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction='mean') \n",
    "    criterion_label=None\n",
    "    model.to(device)\n",
    "    model=train_model(device, model, \n",
    "                task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "                criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "                num_epochs=num_epochs,use_weighted_loss=False)\n",
    "\n",
    "\n",
    "    # 在完整数据集上评估模型\n",
    "    loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader}, criterion)\n",
    "    print(f'Loss on test set: {loss:.4f}')\n",
    "    print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "    # 保存模型\n",
    "    # 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "    accuracy_str = f\"{accuracy:.3f}\"\n",
    "    current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "    # 使用准确率值作为文件名的一部分\n",
    "    file_name = f\"Loop_branch_{str(tsk)}_{accuracy_str}_{current_time}.pth\"\n",
    "    # 确保保存路径存在，如果不存在则创建\n",
    "    task_save_pth = os.path.join(model_save_pth, str(tsk))\n",
    "    if not os.path.exists(task_save_pth):\n",
    "        os.makedirs(task_save_pth, exist_ok=True)\n",
    "    # 保存模型到指定路径\n",
    "    torch.save(model.state_dict(), os.path.join(task_save_pth, file_name))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss on test set: 1.5158\n",
      "Accuracy on test set: 0.6882\n",
      "Loss on test set: 0.9232\n",
      "Accuracy on test set: 0.7220\n"
     ]
    }
   ],
   "source": [
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[0]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[1]})\n",
    "print(f'Loss on test set: {loss:.4f}')\n",
    "print(f'Accuracy on test set: {accuracy:.4f}')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "dl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
