{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import ConcatDataset,DataLoader\n",
    "from torch import nn, optim\n",
    "import torch\n",
    "import datetime\n",
    "from tqdm.notebook import tqdm\n",
    "import os\n",
    "#from cascade_lib import *\n",
    "from cascade_libary.networkv3 import create_ArcMultiTaskModel\n",
    "from cascade_libary.dataset import create_exampler_datasets_and_loaders, create_task_datasets_and_loaders,MyDataset,transform_test,transform_train\n",
    "from cascade_libary.train_evalv1 import train_model, evaluate_model\n",
    "from cascade_libary.criterian import get_class_stats,sort_and_complete_alpha_by_label_mapping,MultiClassFocalLossWithAlpha\n",
    "from cascade_libary.utils import freeze_layers\n",
    "from torchvision import datasets as torchvision_datasets\n",
    "import numpy as np\n",
    "import itertools\n",
    "from kornia.losses import FocalLoss\n",
    "import math\n",
    "import torch.nn.functional as F\n",
    "import random\n",
    "from copy import deepcopy\n",
    "#from kornia.losses import FocalLoss\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    #'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "    #freeze_prefix.append('arc_classifiers.' + str(tsk))\n",
    "\n",
    "    0.16 \n",
    "\n",
    "    46.97\n",
    "\n",
    "\n",
    "    0.25\n",
    "\n",
    "    0.472\n",
    "        gamma 1,0 \n",
    "        0.422\n",
    "\n",
    "\n",
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "weight_label=0.5+((tsk-1)*0.25)\n",
    "\n",
    "不冻arc cls\n",
    "\n",
    "gamma 0，0\n",
    "\n",
    "0.485"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def reweight_alphas(task_class_orders, tsk,alpha_cls, alpha_tsk,w_cls, w_tsk):\n",
    "    \"\"\"\n",
    "    更新alpha_cls和alpha_tsk的函数。\n",
    "    \n",
    "    参数:\n",
    "    task_class_orders -- 任务中类别的顺序列表。\n",
    "    tsk -- 我们关注的是第tsk个任务（索引从1开始）。\n",
    "    w_cls -- alpha_cls需要乘以的权重。\n",
    "    w_tsk -- alpha_tsk需要乘以的权重。\n",
    "    \n",
    "    返回:\n",
    "    更新后的alpha_cls和alpha_tsk列表。\n",
    "    \"\"\"\n",
    "    # 计算第tsk个任务之前的所有任务的类别总数\n",
    "\n",
    "    num_prev_task_classes = sum(len(task) for task in task_class_orders[:tsk])\n",
    "    print(\"num_prev_task_classes:\", num_prev_task_classes)\n",
    "    # 更新alpha_cls\n",
    "    for i in range(num_prev_task_classes):\n",
    "        alpha_cls[i] *= w_cls\n",
    "    \n",
    "    # 更新alpha_tsk\n",
    "    for i in range(tsk):\n",
    "        alpha_tsk[i] *= w_tsk\n",
    "    \n",
    "    return alpha_cls, alpha_tsk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置随机种子\n",
    "def set_seed(seed_value=42):\n",
    "    random.seed(seed_value)  # Python内置的随机模块\n",
    "    np.random.seed(seed_value)  # NumPy的随机数生成器\n",
    "    '''torch.manual_seed(seed_value)  # PyTorch的随机数生成器\n",
    "    # 如果使用CUDA\n",
    "    torch.cuda.manual_seed_all(seed_value)  # 为所有CUDA设备设置随机种子\n",
    "    torch.backends.cudnn.deterministic = True  # 设置确定性卷积\n",
    "    torch.backends.cudnn.benchmark = False  # 为了确保确定性，关闭 cudnn 基准测试'''\n",
    "# 调用函数设置随机种子\n",
    "set_seed(42)\n",
    "\n",
    "gpu=1\n",
    "if torch.cuda.is_available():\n",
    "        torch.cuda.set_device(gpu)\n",
    "        device = 'cuda'\n",
    "else:\n",
    "        print('WARNING: [CUDA unavailable] Using CPU instead!')\n",
    "        device = 'cpu'\n",
    "#device='cpu'\n",
    "model_save_pth='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/loop'\n",
    "\n",
    "s=12.5\n",
    "m=0.0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 初始化"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 在函数外面加载数据集\n",
    "root_dir = '../data'\n",
    "train_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=True, download=True)\n",
    "test_dataset = torchvision_datasets.CIFAR100(root=root_dir, train=False, download=True)\n",
    "\n",
    "# 随机化类别顺序\n",
    "class_order = list(range(100))\n",
    "random.shuffle(class_order)\n",
    "label_mapping = {original_label: new_label for new_label, original_label in enumerate(class_order)}\n",
    "# 定义每个任务的类别数量列表\n",
    "num_classes_per_task = [50, 5, 5, 5, 5, 5,5,5,5,5,5]\n",
    "\n",
    "batch_size = 128\n",
    "# 调用函数创建任务数据集和数据加载器\n",
    "# task_class_orders包含了随机属性\n",
    "origin_train_set, origin_test_set, train_loaders, test_loaders,task_class_orders = create_task_datasets_and_loaders(train_dataset, test_dataset, num_classes_per_task,class_order, batch_size=batch_size, transform_train=transform_train, transform_test=transform_test)\n",
    "\n",
    "# 假设你已经定义了origin_train_set和origin_test_set\n",
    "# 以及数据转换transform_train和transform_test\n",
    "n_samples_per_class = 20\n",
    "exampler_train_sets, exampler_test_sets, exampler_train_loaders, exampler_test_test_loaders = create_exampler_datasets_and_loaders(origin_train_set, origin_test_set, n_samples_per_class, batch_size, transform_train, transform_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "设置标签转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确定原始标签的范围（即最大的原始标签值）\n",
    "max_label = max(label_mapping.keys())\n",
    "\n",
    "# 创建一个长度等于原始标签范围的零数组\n",
    "label_mapping_array = np.zeros(max_label + 1, dtype=int)\n",
    "\n",
    "# 使用 label_mapping 字典填充这个数组\n",
    "for original_label, new_label in label_mapping.items():\n",
    "    label_mapping_array[original_label] = new_label\n",
    "\n",
    "# 将这个数组转换为 PyTorch 张量\n",
    "label_mapping_tensor = torch.from_numpy(label_mapping_array).to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = create_ArcMultiTaskModel('resnet34',512,None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 1\n",
      "Incremental Task Layer added, current len: 1\n",
      "模型已从 /home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.754_20241224_084407.pth 成功加载。\n"
     ]
    }
   ],
   "source": [
    "tsk=1\n",
    "# 重新初始化模型并加载预训练参数\n",
    "model = create_ArcMultiTaskModel('resnet34',512,None)\n",
    "model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[0], s=s, m=m)\n",
    "model.add_incremental_task_layer(isInitial=True) \n",
    "\n",
    "\n",
    "full_path='/home/ubuntu/workspace/cascade_incremental/temp/models/cascade/cascade_tsk_0_0.754_20241224_084407.pth'\n",
    "# 加载模型\n",
    "if os.path.isfile(full_path):\n",
    "    model.load_state_dict(torch.load(full_path,weights_only=True))\n",
    "    print(f\"模型已从 {full_path} 成功加载。\")\n",
    "else:\n",
    "    print(f\"文件 {full_path} 不存在。\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Incremental ARC Classifier added, current len: 2\n",
      "Incremental Task Layer added, current len: 2\n",
      "tsk: 1\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "incremental_layers.1.layer.0.weight\n",
      "incremental_layers.1.layer.0.bias\n",
      "incremental_layers.1.layer.1.weight\n",
      "incremental_layers.1.layer.1.bias\n",
      "incremental_layers.1.layer.3.weight\n",
      "incremental_layers.1.layer.3.bias\n",
      "incremental_layers.1.layer.4.weight\n",
      "incremental_layers.1.layer.4.bias\n",
      "incremental_layers.1.layer.6.weight\n",
      "incremental_layers.1.layer.6.bias\n",
      "incremental_layers.1.layer.7.weight\n",
      "incremental_layers.1.layer.7.bias\n",
      "incremental_layers.1.gate_generator.0.weight\n",
      "incremental_layers.1.gate_generator.0.bias\n",
      "incremental_layers.1.gate_generator.1.weight\n",
      "incremental_layers.1.gate_generator.1.bias\n",
      "incremental_layers.1.gate_generator.5.weight\n",
      "incremental_layers.1.gate_generator.5.bias\n",
      "arc_classifiers.0.2.weight\n",
      "arc_classifiers.0.2.bias\n",
      "arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "arc_classifiers.1.4.weight\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "alpha_cls: [174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      "   6.999951     6.999951     6.999951     6.999951     6.999951  ]\n",
      "alpha_tsk: [3.49998775 1.39999804]\n",
      "num_prev_task_classes: 50\n",
      "alpha_cls: [174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      " 174.96938036 174.96938036 174.96938036 174.96938036 174.96938036\n",
      "   6.999951     6.999951     6.999951     6.999951     6.999951  ]\n",
      "alpha_tsk: [228.14190917  91.2569553 ]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e6434fb846d64234b64e5329476eb66e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/840 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 58.9993\n",
      "Per Task Accuracy: {0: 0.9727591387080621, 1: 0.196149910463034}\n",
      "Validation Loss: 659.1750 - Validation Accuracy: 0.1949\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 22.7202\n",
      "Per Task Accuracy: {0: 0.9953930896344517, 1: 0.06408288564850345}\n",
      "Validation Loss: 202.9943 - Validation Accuracy: 0.6893\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 16.8898\n",
      "Per Task Accuracy: {0: 0.9981972959439158, 1: 0.02999488360194423}\n",
      "Validation Loss: 202.7012 - Validation Accuracy: 0.7013\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 15.6367\n",
      "Per Task Accuracy: {0: 0.9977966950425639, 1: 0.030058838577641342}\n",
      "Validation Loss: 201.9302 - Validation Accuracy: 0.7078\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 12.2142\n",
      "Per Task Accuracy: {0: 0.9977966950425639, 1: 0.030186748529035558}\n",
      "Validation Loss: 203.5688 - Validation Accuracy: 0.7125\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 12.5201\n",
      "Per Task Accuracy: {0: 0.9981972959439158, 1: 0.03012279355333845}\n",
      "Validation Loss: 202.0668 - Validation Accuracy: 0.7082\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 11.4970\n",
      "Per Task Accuracy: {0: 0.99899849774662, 1: 0.022000511639805577}\n",
      "Validation Loss: 205.0712 - Validation Accuracy: 0.7111\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 10.2470\n",
      "Per Task Accuracy: {0: 0.9985978968452679, 1: 0.022000511639805577}\n",
      "Validation Loss: 213.1884 - Validation Accuracy: 0.7089\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 9.8246\n",
      "Per Task Accuracy: {0: 0.9979969954932398, 1: 0.03178562292146329}\n",
      "Validation Loss: 214.7191 - Validation Accuracy: 0.7049\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 8.1109\n",
      "Per Task Accuracy: {0: 0.9977966950425639, 1: 0.03587874136607828}\n",
      "Validation Loss: 220.6247 - Validation Accuracy: 0.7045\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 8.9293\n",
      "Per Task Accuracy: {0: 0.9975963945918879, 1: 0.03587874136607828}\n",
      "Validation Loss: 216.9678 - Validation Accuracy: 0.7089\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 7.6933\n",
      "Per Task Accuracy: {0: 0.9967951927891838, 1: 0.04796623177283193}\n",
      "Validation Loss: 217.5309 - Validation Accuracy: 0.7065\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 7.4711\n",
      "Per Task Accuracy: {0: 0.9979969954932398, 1: 0.038053210539779996}\n",
      "Validation Loss: 217.1909 - Validation Accuracy: 0.7073\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 8.0145\n",
      "Per Task Accuracy: {0: 0.9983975963945919, 1: 0.02999488360194423}\n",
      "Validation Loss: 219.5801 - Validation Accuracy: 0.7053\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 6.8473\n",
      "Per Task Accuracy: {0: 0.9971957936905358, 1: 0.05007674597083653}\n",
      "Validation Loss: 225.5482 - Validation Accuracy: 0.7035\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 6.6245\n",
      "Per Task Accuracy: {0: 0.9979969954932398, 1: 0.04022767971348171}\n",
      "Validation Loss: 229.8462 - Validation Accuracy: 0.6991\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 5.9741\n",
      "Per Task Accuracy: {0: 0.9977966950425639, 1: 0.042146328984394986}\n",
      "Validation Loss: 231.8672 - Validation Accuracy: 0.7005\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 6.7332\n",
      "Per Task Accuracy: {0: 0.9975963945918879, 1: 0.04611153747761576}\n",
      "Validation Loss: 225.9677 - Validation Accuracy: 0.7020\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 6.0369\n",
      "Per Task Accuracy: {0: 0.9971957936905358, 1: 0.0560885136863648}\n",
      "Validation Loss: 231.5017 - Validation Accuracy: 0.7018\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 5.9410\n",
      "Per Task Accuracy: {0: 0.9971957936905358, 1: 0.05423381939114863}\n",
      "Validation Loss: 234.9146 - Validation Accuracy: 0.7029\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 5.2827\n",
      "Per Task Accuracy: {0: 0.9973960941412118, 1: 0.04803018674852903}\n",
      "Validation Loss: 238.3516 - Validation Accuracy: 0.6984\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 5.5861\n",
      "Per Task Accuracy: {0: 0.9977966950425639, 1: 0.044064978255308265}\n",
      "Validation Loss: 236.3727 - Validation Accuracy: 0.7013\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 4.8702\n",
      "Per Task Accuracy: {0: 0.9973960941412118, 1: 0.044064978255308265}\n",
      "Validation Loss: 238.5686 - Validation Accuracy: 0.6993\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 5.2663\n",
      "Per Task Accuracy: {0: 0.9981972959439158, 1: 0.02411102583781018}\n",
      "Validation Loss: 239.0133 - Validation Accuracy: 0.6971\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 5.7412\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.003837298541826554}\n",
      "Validation Loss: 234.7832 - Validation Accuracy: 0.6985\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 5.0721\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.012023535431056536}\n",
      "Validation Loss: 234.5597 - Validation Accuracy: 0.6980\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 5.5528\n",
      "Per Task Accuracy: {0: 0.9983975963945919, 1: 0.027948324379636735}\n",
      "Validation Loss: 238.8359 - Validation Accuracy: 0.6960\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 5.3469\n",
      "Per Task Accuracy: {0: 0.9973960941412118, 1: 0.04803018674852903}\n",
      "Validation Loss: 240.9927 - Validation Accuracy: 0.6916\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 4.3912\n",
      "Per Task Accuracy: {0: 0.9981972959439158, 1: 0.027756459452545408}\n",
      "Validation Loss: 240.7852 - Validation Accuracy: 0.6953\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 4.2843\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.011895625479662318}\n",
      "Validation Loss: 242.2822 - Validation Accuracy: 0.6973\n",
      "Task 1:\n",
      "Loss on test set: 1.5205\n",
      "Accuracy on test set: 0.6976\n",
      "Task 2:\n",
      "Loss on test set: 0.9285\n",
      "Accuracy on test set: 0.6940\n",
      "Loss on test set: 242.4536\n",
      "Accuracy on test set: 0.6973\n",
      "Incremental ARC Classifier added, current len: 3\n",
      "Incremental Task Layer added, current len: 3\n",
      "tsk: 2\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0.layer', 'incremental_layers.1.layer', 'arc_classifiers.0']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "incremental_layers.1.gate_generator.0.weight\n",
      "incremental_layers.1.gate_generator.0.bias\n",
      "incremental_layers.1.gate_generator.1.weight\n",
      "incremental_layers.1.gate_generator.1.bias\n",
      "incremental_layers.1.gate_generator.5.weight\n",
      "incremental_layers.1.gate_generator.5.bias\n",
      "incremental_layers.2.layer.0.weight\n",
      "incremental_layers.2.layer.0.bias\n",
      "incremental_layers.2.layer.1.weight\n",
      "incremental_layers.2.layer.1.bias\n",
      "incremental_layers.2.layer.3.weight\n",
      "incremental_layers.2.layer.3.bias\n",
      "incremental_layers.2.layer.4.weight\n",
      "incremental_layers.2.layer.4.bias\n",
      "incremental_layers.2.layer.6.weight\n",
      "incremental_layers.2.layer.6.bias\n",
      "incremental_layers.2.layer.7.weight\n",
      "incremental_layers.2.layer.7.bias\n",
      "incremental_layers.2.gate_generator.0.weight\n",
      "incremental_layers.2.gate_generator.0.bias\n",
      "incremental_layers.2.gate_generator.1.weight\n",
      "incremental_layers.2.gate_generator.1.bias\n",
      "incremental_layers.2.gate_generator.5.weight\n",
      "incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing arc_classifiers.0.2.weight\n",
      "----Freezing arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "arc_classifiers.1.2.weight\n",
      "arc_classifiers.1.2.bias\n",
      "arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "arc_classifiers.2.4.weight\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 3.59998704 35.99870405  1.43999793]\n",
      "num_prev_task_classes: 55\n",
      "alpha_cls: [179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      " 179.96760583 179.96760583 179.96760583 179.96760583 179.96760583\n",
      "   7.19994816   7.19994816   7.19994816   7.19994816   7.19994816]\n",
      "alpha_tsk: [ 43.57236687 435.7095518   17.4289844 ]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9b32e35644c34d79ac347efd24212213",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/870 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 55.7470\n",
      "Per Task Accuracy: {0: 0.9539308963445168, 1: 0.09241493988232284, 2: 0.07810474462017111}\n",
      "Validation Loss: 694.9565 - Validation Accuracy: 0.1597\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 20.8346\n",
      "Per Task Accuracy: {0: 0.985778668002003, 1: 0.14875927347147608, 2: 0.0018148820326678765}\n",
      "Validation Loss: 287.2155 - Validation Accuracy: 0.6277\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 22.4924\n",
      "Per Task Accuracy: {0: 0.9837756634952428, 1: 0.16046303402404707, 2: 0.00388903292714545}\n",
      "Validation Loss: 276.3070 - Validation Accuracy: 0.6338\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 15.2201\n",
      "Per Task Accuracy: {0: 0.9931897846770156, 1: 0.08582757738552059, 2: 0.00388903292714545}\n",
      "Validation Loss: 273.3063 - Validation Accuracy: 0.6420\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 15.7259\n",
      "Per Task Accuracy: {0: 0.9935903855783675, 1: 0.07994371962138655, 2: 0.00388903292714545}\n",
      "Validation Loss: 265.7110 - Validation Accuracy: 0.6477\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 13.8432\n",
      "Per Task Accuracy: {0: 0.9917876815222835, 1: 0.08806600153491942, 2: 0.0}\n",
      "Validation Loss: 268.1195 - Validation Accuracy: 0.6505\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 12.9468\n",
      "Per Task Accuracy: {0: 0.9955933900851277, 1: 0.05973394730110002, 2: 0.0}\n",
      "Validation Loss: 266.9221 - Validation Accuracy: 0.6502\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 11.7813\n",
      "Per Task Accuracy: {0: 0.9969954932398598, 1: 0.04764645689434638, 2: 0.0}\n",
      "Validation Loss: 265.4204 - Validation Accuracy: 0.6538\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 10.1939\n",
      "Per Task Accuracy: {0: 0.9975963945918879, 1: 0.029803018674852903, 2: 0.0}\n",
      "Validation Loss: 268.9568 - Validation Accuracy: 0.6518\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 10.1962\n",
      "Per Task Accuracy: {0: 0.9965948923385077, 1: 0.053786134561268864, 2: 0.0}\n",
      "Validation Loss: 263.3684 - Validation Accuracy: 0.6548\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 10.5575\n",
      "Per Task Accuracy: {0: 0.9907861792689033, 1: 0.09426963417753902, 2: 0.0}\n",
      "Validation Loss: 268.8701 - Validation Accuracy: 0.6530\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 11.5605\n",
      "Per Task Accuracy: {0: 0.9933900851276916, 1: 0.08199027884369404, 2: 0.0}\n",
      "Validation Loss: 273.0247 - Validation Accuracy: 0.6500\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 10.7009\n",
      "Per Task Accuracy: {0: 0.9955933900851277, 1: 0.06395497569710923, 2: 0.0}\n",
      "Validation Loss: 268.3039 - Validation Accuracy: 0.6557\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 8.2056\n",
      "Per Task Accuracy: {0: 0.9965948923385077, 1: 0.055768738807879255, 2: 0.0}\n",
      "Validation Loss: 270.1594 - Validation Accuracy: 0.6548\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 8.2633\n",
      "Per Task Accuracy: {0: 0.9987981972959439, 1: 0.02590176515732924, 2: 0.0}\n",
      "Validation Loss: 264.2147 - Validation Accuracy: 0.6602\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 7.3960\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.017971348170887694, 2: 0.0}\n",
      "Validation Loss: 267.8663 - Validation Accuracy: 0.6582\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 8.4718\n",
      "Per Task Accuracy: {0: 0.999799699549324, 1: 0.001918649270913277, 2: 0.0}\n",
      "Validation Loss: 264.3859 - Validation Accuracy: 0.6605\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 9.1565\n",
      "Per Task Accuracy: {0: 0.999799699549324, 1: 0.001918649270913277, 2: 0.0}\n",
      "Validation Loss: 268.5930 - Validation Accuracy: 0.6602\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 8.0937\n",
      "Per Task Accuracy: {0: 0.999799699549324, 1: 0.0, 2: 0.0}\n",
      "Validation Loss: 263.2520 - Validation Accuracy: 0.6643\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 7.5357\n",
      "Per Task Accuracy: {0: 0.999799699549324, 1: 0.0, 2: 0.0}\n",
      "Validation Loss: 268.1108 - Validation Accuracy: 0.6595\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 8.4082\n",
      "Per Task Accuracy: {0: 0.999799699549324, 1: 0.0, 2: 0.0}\n",
      "Validation Loss: 272.0303 - Validation Accuracy: 0.6570\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 8.0161\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.001918649270913277, 2: 0.0}\n",
      "Validation Loss: 267.1030 - Validation Accuracy: 0.6598\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 6.8850\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.007802507035047326, 2: 0.0}\n",
      "Validation Loss: 269.8240 - Validation Accuracy: 0.6597\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 7.1805\n",
      "Per Task Accuracy: {0: 0.999799699549324, 1: 0.001918649270913277, 2: 0.0}\n",
      "Validation Loss: 267.4868 - Validation Accuracy: 0.6610\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 6.0368\n",
      "Per Task Accuracy: {0: 0.999799699549324, 1: 0.001918649270913277, 2: 0.0}\n",
      "Validation Loss: 272.5797 - Validation Accuracy: 0.6562\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 6.4567\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.007802507035047326, 2: 0.0}\n",
      "Validation Loss: 271.5490 - Validation Accuracy: 0.6578\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 6.1233\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.007802507035047326, 2: 0.0}\n",
      "Validation Loss: 272.6320 - Validation Accuracy: 0.6567\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 6.2217\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.007802507035047326, 2: 0.0}\n",
      "Validation Loss: 274.6958 - Validation Accuracy: 0.6575\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 6.2257\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.001918649270913277, 2: 0.0}\n",
      "Validation Loss: 274.4594 - Validation Accuracy: 0.6562\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 5.6434\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.001918649270913277, 2: 0.0}\n",
      "Validation Loss: 270.8509 - Validation Accuracy: 0.6590\n",
      "Task 1:\n",
      "Loss on test set: 1.5957\n",
      "Accuracy on test set: 0.6728\n",
      "Task 2:\n",
      "Loss on test set: 2.1209\n",
      "Accuracy on test set: 0.4460\n",
      "Task 3:\n",
      "Loss on test set: 0.7210\n",
      "Accuracy on test set: 0.7340\n",
      "Loss on test set: 271.5571\n",
      "Accuracy on test set: 0.6590\n",
      "Incremental ARC Classifier added, current len: 4\n",
      "Incremental Task Layer added, current len: 4\n",
      "tsk: 3\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0.layer', 'incremental_layers.1.layer', 'incremental_layers.2.layer', 'arc_classifiers.0', 'arc_classifiers.1']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "incremental_layers.1.gate_generator.0.weight\n",
      "incremental_layers.1.gate_generator.0.bias\n",
      "incremental_layers.1.gate_generator.1.weight\n",
      "incremental_layers.1.gate_generator.1.bias\n",
      "incremental_layers.1.gate_generator.5.weight\n",
      "incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "incremental_layers.2.gate_generator.0.weight\n",
      "incremental_layers.2.gate_generator.0.bias\n",
      "incremental_layers.2.gate_generator.1.weight\n",
      "incremental_layers.2.gate_generator.1.bias\n",
      "incremental_layers.2.gate_generator.5.weight\n",
      "incremental_layers.2.gate_generator.5.bias\n",
      "incremental_layers.3.layer.0.weight\n",
      "incremental_layers.3.layer.0.bias\n",
      "incremental_layers.3.layer.1.weight\n",
      "incremental_layers.3.layer.1.bias\n",
      "incremental_layers.3.layer.3.weight\n",
      "incremental_layers.3.layer.3.bias\n",
      "incremental_layers.3.layer.4.weight\n",
      "incremental_layers.3.layer.4.bias\n",
      "incremental_layers.3.layer.6.weight\n",
      "incremental_layers.3.layer.6.bias\n",
      "incremental_layers.3.layer.7.weight\n",
      "incremental_layers.3.layer.7.bias\n",
      "incremental_layers.3.gate_generator.0.weight\n",
      "incremental_layers.3.gate_generator.0.bias\n",
      "incremental_layers.3.gate_generator.1.weight\n",
      "incremental_layers.3.gate_generator.1.bias\n",
      "incremental_layers.3.gate_generator.5.weight\n",
      "incremental_layers.3.gate_generator.5.bias\n",
      "----Freezing arc_classifiers.0.2.weight\n",
      "----Freezing arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "----Freezing arc_classifiers.1.2.weight\n",
      "----Freezing arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "arc_classifiers.2.2.weight\n",
      "arc_classifiers.2.2.bias\n",
      "arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "arc_classifiers.3.4.weight\n",
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 3.69998631 36.99863105 36.99863105  1.47999781]\n",
      "num_prev_task_classes: 60\n",
      "alpha_cls: [184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      " 184.96578133 184.96578133 184.96578133 184.96578133 184.96578133\n",
      "   7.39994524   7.39994524   7.39994524   7.39994524   7.39994524]\n",
      "alpha_tsk: [ 32.02097104 320.19904778 320.19904778  12.80841685]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "120945923bd8437191434c020171de5e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/870 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 64.4344\n",
      "Per Task Accuracy: {0: 0.9873810716074111, 1: 0.003965208493220773, 2: 0.0, 3: 0.13924050632911392}\n",
      "Validation Loss: 613.4150 - Validation Accuracy: 0.3026\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 20.7015\n",
      "Per Task Accuracy: {0: 0.9925888833249875, 1: 0.009913021233051931, 2: 0.0, 3: 0.0750067330999192}\n",
      "Validation Loss: 315.5167 - Validation Accuracy: 0.6115\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 17.0592\n",
      "Per Task Accuracy: {0: 0.9975963945918879, 1: 0.001918649270913277, 2: 0.0, 3: 0.033328844600053864}\n",
      "Validation Loss: 300.8399 - Validation Accuracy: 0.6269\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 15.4564\n",
      "Per Task Accuracy: {0: 0.9979969954932398, 1: 0.01586083397288309, 2: 0.0, 3: 0.014610826824670077}\n",
      "Validation Loss: 295.2967 - Validation Accuracy: 0.6302\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 14.2908\n",
      "Per Task Accuracy: {0: 0.9953930896344517, 1: 0.04393706830391404, 2: 0.0, 3: 0.0021545919741448962}\n",
      "Validation Loss: 299.0689 - Validation Accuracy: 0.6303\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 13.5590\n",
      "Per Task Accuracy: {0: 0.99899849774662, 1: 0.007802507035047326, 2: 0.0, 3: 0.01077295987072448}\n",
      "Validation Loss: 294.8888 - Validation Accuracy: 0.6366\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 12.2948\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.0, 2: 0.0, 3: 0.022286560732561272}\n",
      "Validation Loss: 294.9223 - Validation Accuracy: 0.6363\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 13.5619\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.0, 2: 0.0, 3: 0.03959062752491247}\n",
      "Validation Loss: 291.8781 - Validation Accuracy: 0.6355\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n",
      "Epoch 9/30 - Train Loss: 10.9610\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.0, 2: 0.0, 3: 0.03501211957985457}\n",
      "Validation Loss: 297.1624 - Validation Accuracy: 0.6365\n",
      "---------------\n",
      "Epoch 10/30\n",
      "---------------\n",
      "Epoch 10/30 - Train Loss: 11.1834\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.0, 2: 0.0, 3: 0.05231618637220577}\n",
      "Validation Loss: 293.2023 - Validation Accuracy: 0.6394\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 11/30\n",
      "---------------\n",
      "Epoch 11/30 - Train Loss: 10.9091\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.0, 2: 0.0, 3: 0.04942095340694856}\n",
      "Validation Loss: 295.1652 - Validation Accuracy: 0.6394\n",
      "---------------\n",
      "Epoch 12/30\n",
      "---------------\n",
      "Epoch 12/30 - Train Loss: 11.5821\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.001918649270913277, 2: 0.0, 3: 0.03979262052248855}\n",
      "Validation Loss: 295.3261 - Validation Accuracy: 0.6400\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 13/30\n",
      "---------------\n",
      "Epoch 13/30 - Train Loss: 9.7906\n",
      "Per Task Accuracy: {0: 0.9993990986479719, 1: 0.001918649270913277, 2: 0.0, 3: 0.025181793697818474}\n",
      "Validation Loss: 294.7488 - Validation Accuracy: 0.6386\n",
      "---------------\n",
      "Epoch 14/30\n",
      "---------------\n",
      "Epoch 14/30 - Train Loss: 9.6889\n",
      "Per Task Accuracy: {0: 0.999198798197296, 1: 0.001918649270913277, 2: 0.0, 3: 0.05440344734715863}\n",
      "Validation Loss: 296.7677 - Validation Accuracy: 0.6403\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 15/30\n",
      "---------------\n",
      "Epoch 15/30 - Train Loss: 9.6068\n",
      "Per Task Accuracy: {0: 0.99899849774662, 1: 0.0, 2: 0.0, 3: 0.06470509022353892}\n",
      "Validation Loss: 297.4126 - Validation Accuracy: 0.6397\n",
      "---------------\n",
      "Epoch 16/30\n",
      "---------------\n",
      "Epoch 16/30 - Train Loss: 9.4047\n",
      "Per Task Accuracy: {0: 0.9981972959439158, 1: 0.0, 2: 0.0, 3: 0.0788446000538648}\n",
      "Validation Loss: 298.1024 - Validation Accuracy: 0.6398\n",
      "---------------\n",
      "Epoch 17/30\n",
      "---------------\n",
      "Epoch 17/30 - Train Loss: 8.9755\n",
      "Per Task Accuracy: {0: 0.9969954932398598, 1: 0.001918649270913277, 2: 0.0, 3: 0.09082951791004579}\n",
      "Validation Loss: 298.5408 - Validation Accuracy: 0.6411\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 18/30\n",
      "---------------\n",
      "Epoch 18/30 - Train Loss: 10.3013\n",
      "Per Task Accuracy: {0: 0.99899849774662, 1: 0.001918649270913277, 2: 0.0, 3: 0.05655803932130353}\n",
      "Validation Loss: 294.2435 - Validation Accuracy: 0.6374\n",
      "---------------\n",
      "Epoch 19/30\n",
      "---------------\n",
      "Epoch 19/30 - Train Loss: 8.9072\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.0, 2: 0.0, 3: 0.0038378669539455967}\n",
      "Validation Loss: 299.7746 - Validation Accuracy: 0.6408\n",
      "---------------\n",
      "Epoch 20/30\n",
      "---------------\n",
      "Epoch 20/30 - Train Loss: 8.6725\n",
      "Per Task Accuracy: {0: 0.9995993990986479, 1: 0.0, 2: 0.0, 3: 0.00814705090223539}\n",
      "Validation Loss: 294.4611 - Validation Accuracy: 0.6426\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 21/30\n",
      "---------------\n",
      "Epoch 21/30 - Train Loss: 8.6043\n",
      "Per Task Accuracy: {0: 0.9975963945918879, 1: 0.0, 2: 0.0, 3: 0.04336116347966604}\n",
      "Validation Loss: 294.5413 - Validation Accuracy: 0.6415\n",
      "---------------\n",
      "Epoch 22/30\n",
      "---------------\n",
      "Epoch 22/30 - Train Loss: 8.8330\n",
      "Per Task Accuracy: {0: 0.9927891837756635, 1: 0.001918649270913277, 2: 0.0, 3: 0.12779423646646917}\n",
      "Validation Loss: 295.3013 - Validation Accuracy: 0.6412\n",
      "---------------\n",
      "Epoch 23/30\n",
      "---------------\n",
      "Epoch 23/30 - Train Loss: 9.3777\n",
      "Per Task Accuracy: {0: 0.9961942914371558, 1: 0.001918649270913277, 2: 0.0, 3: 0.0927821168866146}\n",
      "Validation Loss: 298.2345 - Validation Accuracy: 0.6418\n",
      "---------------\n",
      "Epoch 24/30\n",
      "---------------\n",
      "Epoch 24/30 - Train Loss: 8.5184\n",
      "Per Task Accuracy: {0: 0.9957936905358037, 1: 0.0, 2: 0.0, 3: 0.09682197683813627}\n",
      "Validation Loss: 295.5629 - Validation Accuracy: 0.6395\n",
      "---------------\n",
      "Epoch 25/30\n",
      "---------------\n",
      "Epoch 25/30 - Train Loss: 9.3594\n",
      "Per Task Accuracy: {0: 0.9963945918878317, 1: 0.0, 2: 0.0, 3: 0.09251279288984648}\n",
      "Validation Loss: 293.4326 - Validation Accuracy: 0.6406\n",
      "---------------\n",
      "Epoch 26/30\n",
      "---------------\n",
      "Epoch 26/30 - Train Loss: 7.3048\n",
      "Per Task Accuracy: {0: 0.9961942914371558, 1: 0.0, 2: 0.0, 3: 0.0843657419876111}\n",
      "Validation Loss: 296.4863 - Validation Accuracy: 0.6409\n",
      "---------------\n",
      "Epoch 27/30\n",
      "---------------\n",
      "Epoch 27/30 - Train Loss: 8.1531\n",
      "Per Task Accuracy: {0: 0.9957936905358037, 1: 0.0, 2: 0.0, 3: 0.10113116078642607}\n",
      "Validation Loss: 298.6518 - Validation Accuracy: 0.6414\n",
      "---------------\n",
      "Epoch 28/30\n",
      "---------------\n",
      "Epoch 28/30 - Train Loss: 8.8304\n",
      "Per Task Accuracy: {0: 0.9959939909864797, 1: 0.0, 2: 0.0, 3: 0.09897656881228117}\n",
      "Validation Loss: 303.0165 - Validation Accuracy: 0.6398\n",
      "---------------\n",
      "Epoch 29/30\n",
      "---------------\n",
      "Epoch 29/30 - Train Loss: 7.3412\n",
      "Per Task Accuracy: {0: 0.9945918878317476, 1: 0.0, 2: 0.0, 3: 0.09897656881228117}\n",
      "Validation Loss: 295.1693 - Validation Accuracy: 0.6442\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 30/30\n",
      "---------------\n",
      "Epoch 30/30 - Train Loss: 7.9268\n",
      "Per Task Accuracy: {0: 0.9965948923385077, 1: 0.0, 2: 0.0, 3: 0.09466738486399139}\n",
      "Validation Loss: 299.7315 - Validation Accuracy: 0.6409\n",
      "Task 1:\n",
      "Loss on test set: 1.7132\n",
      "Accuracy on test set: 0.6620\n",
      "Task 2:\n",
      "Loss on test set: 2.2546\n",
      "Accuracy on test set: 0.4380\n",
      "Task 3:\n",
      "Loss on test set: 1.7378\n",
      "Accuracy on test set: 0.5100\n",
      "Task 4:\n",
      "Loss on test set: 0.7612\n",
      "Accuracy on test set: 0.7640\n",
      "Loss on test set: 301.0010\n",
      "Accuracy on test set: 0.6409\n",
      "Incremental ARC Classifier added, current len: 5\n",
      "Incremental Task Layer added, current len: 5\n",
      "tsk: 4\n",
      "freeze_prefix: ['feature_extractor', 'incremental_layers.0.layer', 'incremental_layers.1.layer', 'incremental_layers.2.layer', 'incremental_layers.3.layer', 'arc_classifiers.0', 'arc_classifiers.1', 'arc_classifiers.2']\n",
      "----Freezing feature_extractor.0.weight\n",
      "----Freezing feature_extractor.1.weight\n",
      "----Freezing feature_extractor.1.bias\n",
      "----Freezing feature_extractor.4.0.conv1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.weight\n",
      "----Freezing feature_extractor.4.0.bn1.bias\n",
      "----Freezing feature_extractor.4.0.conv2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.weight\n",
      "----Freezing feature_extractor.4.0.bn2.bias\n",
      "----Freezing feature_extractor.4.1.conv1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.weight\n",
      "----Freezing feature_extractor.4.1.bn1.bias\n",
      "----Freezing feature_extractor.4.1.conv2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.weight\n",
      "----Freezing feature_extractor.4.1.bn2.bias\n",
      "----Freezing feature_extractor.4.2.conv1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.weight\n",
      "----Freezing feature_extractor.4.2.bn1.bias\n",
      "----Freezing feature_extractor.4.2.conv2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.weight\n",
      "----Freezing feature_extractor.4.2.bn2.bias\n",
      "----Freezing feature_extractor.5.0.conv1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.weight\n",
      "----Freezing feature_extractor.5.0.bn1.bias\n",
      "----Freezing feature_extractor.5.0.conv2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.weight\n",
      "----Freezing feature_extractor.5.0.bn2.bias\n",
      "----Freezing feature_extractor.5.0.downsample.0.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.weight\n",
      "----Freezing feature_extractor.5.0.downsample.1.bias\n",
      "----Freezing feature_extractor.5.1.conv1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.weight\n",
      "----Freezing feature_extractor.5.1.bn1.bias\n",
      "----Freezing feature_extractor.5.1.conv2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.weight\n",
      "----Freezing feature_extractor.5.1.bn2.bias\n",
      "----Freezing feature_extractor.5.2.conv1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.weight\n",
      "----Freezing feature_extractor.5.2.bn1.bias\n",
      "----Freezing feature_extractor.5.2.conv2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.weight\n",
      "----Freezing feature_extractor.5.2.bn2.bias\n",
      "----Freezing feature_extractor.5.3.conv1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.weight\n",
      "----Freezing feature_extractor.5.3.bn1.bias\n",
      "----Freezing feature_extractor.5.3.conv2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.weight\n",
      "----Freezing feature_extractor.5.3.bn2.bias\n",
      "----Freezing feature_extractor.6.0.conv1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.weight\n",
      "----Freezing feature_extractor.6.0.bn1.bias\n",
      "----Freezing feature_extractor.6.0.conv2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.weight\n",
      "----Freezing feature_extractor.6.0.bn2.bias\n",
      "----Freezing feature_extractor.6.0.downsample.0.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.weight\n",
      "----Freezing feature_extractor.6.0.downsample.1.bias\n",
      "----Freezing feature_extractor.6.1.conv1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.weight\n",
      "----Freezing feature_extractor.6.1.bn1.bias\n",
      "----Freezing feature_extractor.6.1.conv2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.weight\n",
      "----Freezing feature_extractor.6.1.bn2.bias\n",
      "----Freezing feature_extractor.6.2.conv1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.weight\n",
      "----Freezing feature_extractor.6.2.bn1.bias\n",
      "----Freezing feature_extractor.6.2.conv2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.weight\n",
      "----Freezing feature_extractor.6.2.bn2.bias\n",
      "----Freezing feature_extractor.6.3.conv1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.weight\n",
      "----Freezing feature_extractor.6.3.bn1.bias\n",
      "----Freezing feature_extractor.6.3.conv2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.weight\n",
      "----Freezing feature_extractor.6.3.bn2.bias\n",
      "----Freezing feature_extractor.6.4.conv1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.weight\n",
      "----Freezing feature_extractor.6.4.bn1.bias\n",
      "----Freezing feature_extractor.6.4.conv2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.weight\n",
      "----Freezing feature_extractor.6.4.bn2.bias\n",
      "----Freezing feature_extractor.6.5.conv1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.weight\n",
      "----Freezing feature_extractor.6.5.bn1.bias\n",
      "----Freezing feature_extractor.6.5.conv2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.weight\n",
      "----Freezing feature_extractor.6.5.bn2.bias\n",
      "----Freezing feature_extractor.7.0.conv1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.weight\n",
      "----Freezing feature_extractor.7.0.bn1.bias\n",
      "----Freezing feature_extractor.7.0.conv2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.weight\n",
      "----Freezing feature_extractor.7.0.bn2.bias\n",
      "----Freezing feature_extractor.7.0.downsample.0.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.weight\n",
      "----Freezing feature_extractor.7.0.downsample.1.bias\n",
      "----Freezing feature_extractor.7.1.conv1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.weight\n",
      "----Freezing feature_extractor.7.1.bn1.bias\n",
      "----Freezing feature_extractor.7.1.conv2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.weight\n",
      "----Freezing feature_extractor.7.1.bn2.bias\n",
      "----Freezing feature_extractor.7.2.conv1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.weight\n",
      "----Freezing feature_extractor.7.2.bn1.bias\n",
      "----Freezing feature_extractor.7.2.conv2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.weight\n",
      "----Freezing feature_extractor.7.2.bn2.bias\n",
      "----Freezing incremental_layers.1.layer.0.weight\n",
      "----Freezing incremental_layers.1.layer.0.bias\n",
      "----Freezing incremental_layers.1.layer.1.weight\n",
      "----Freezing incremental_layers.1.layer.1.bias\n",
      "----Freezing incremental_layers.1.layer.3.weight\n",
      "----Freezing incremental_layers.1.layer.3.bias\n",
      "----Freezing incremental_layers.1.layer.4.weight\n",
      "----Freezing incremental_layers.1.layer.4.bias\n",
      "----Freezing incremental_layers.1.layer.6.weight\n",
      "----Freezing incremental_layers.1.layer.6.bias\n",
      "----Freezing incremental_layers.1.layer.7.weight\n",
      "----Freezing incremental_layers.1.layer.7.bias\n",
      "incremental_layers.1.gate_generator.0.weight\n",
      "incremental_layers.1.gate_generator.0.bias\n",
      "incremental_layers.1.gate_generator.1.weight\n",
      "incremental_layers.1.gate_generator.1.bias\n",
      "incremental_layers.1.gate_generator.5.weight\n",
      "incremental_layers.1.gate_generator.5.bias\n",
      "----Freezing incremental_layers.2.layer.0.weight\n",
      "----Freezing incremental_layers.2.layer.0.bias\n",
      "----Freezing incremental_layers.2.layer.1.weight\n",
      "----Freezing incremental_layers.2.layer.1.bias\n",
      "----Freezing incremental_layers.2.layer.3.weight\n",
      "----Freezing incremental_layers.2.layer.3.bias\n",
      "----Freezing incremental_layers.2.layer.4.weight\n",
      "----Freezing incremental_layers.2.layer.4.bias\n",
      "----Freezing incremental_layers.2.layer.6.weight\n",
      "----Freezing incremental_layers.2.layer.6.bias\n",
      "----Freezing incremental_layers.2.layer.7.weight\n",
      "----Freezing incremental_layers.2.layer.7.bias\n",
      "incremental_layers.2.gate_generator.0.weight\n",
      "incremental_layers.2.gate_generator.0.bias\n",
      "incremental_layers.2.gate_generator.1.weight\n",
      "incremental_layers.2.gate_generator.1.bias\n",
      "incremental_layers.2.gate_generator.5.weight\n",
      "incremental_layers.2.gate_generator.5.bias\n",
      "----Freezing incremental_layers.3.layer.0.weight\n",
      "----Freezing incremental_layers.3.layer.0.bias\n",
      "----Freezing incremental_layers.3.layer.1.weight\n",
      "----Freezing incremental_layers.3.layer.1.bias\n",
      "----Freezing incremental_layers.3.layer.3.weight\n",
      "----Freezing incremental_layers.3.layer.3.bias\n",
      "----Freezing incremental_layers.3.layer.4.weight\n",
      "----Freezing incremental_layers.3.layer.4.bias\n",
      "----Freezing incremental_layers.3.layer.6.weight\n",
      "----Freezing incremental_layers.3.layer.6.bias\n",
      "----Freezing incremental_layers.3.layer.7.weight\n",
      "----Freezing incremental_layers.3.layer.7.bias\n",
      "incremental_layers.3.gate_generator.0.weight\n",
      "incremental_layers.3.gate_generator.0.bias\n",
      "incremental_layers.3.gate_generator.1.weight\n",
      "incremental_layers.3.gate_generator.1.bias\n",
      "incremental_layers.3.gate_generator.5.weight\n",
      "incremental_layers.3.gate_generator.5.bias\n",
      "incremental_layers.4.layer.0.weight\n",
      "incremental_layers.4.layer.0.bias\n",
      "incremental_layers.4.layer.1.weight\n",
      "incremental_layers.4.layer.1.bias\n",
      "incremental_layers.4.layer.3.weight\n",
      "incremental_layers.4.layer.3.bias\n",
      "incremental_layers.4.layer.4.weight\n",
      "incremental_layers.4.layer.4.bias\n",
      "incremental_layers.4.layer.6.weight\n",
      "incremental_layers.4.layer.6.bias\n",
      "incremental_layers.4.layer.7.weight\n",
      "incremental_layers.4.layer.7.bias\n",
      "incremental_layers.4.gate_generator.0.weight\n",
      "incremental_layers.4.gate_generator.0.bias\n",
      "incremental_layers.4.gate_generator.1.weight\n",
      "incremental_layers.4.gate_generator.1.bias\n",
      "incremental_layers.4.gate_generator.5.weight\n",
      "incremental_layers.4.gate_generator.5.bias\n",
      "----Freezing arc_classifiers.0.2.weight\n",
      "----Freezing arc_classifiers.0.2.bias\n",
      "----Freezing arc_classifiers.0.4.weight\n",
      "----Freezing arc_classifiers.1.2.weight\n",
      "----Freezing arc_classifiers.1.2.bias\n",
      "----Freezing arc_classifiers.1.4.weight\n",
      "----Freezing arc_classifiers.2.2.weight\n",
      "----Freezing arc_classifiers.2.2.bias\n",
      "----Freezing arc_classifiers.2.4.weight\n",
      "arc_classifiers.3.2.weight\n",
      "arc_classifiers.3.2.bias\n",
      "arc_classifiers.3.4.weight\n",
      "arc_classifiers.4.2.weight\n",
      "arc_classifiers.4.2.bias\n",
      "arc_classifiers.4.4.weight\n",
      "alpha_cls: [189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      "   7.59994224   7.59994224   7.59994224   7.59994224   7.59994224]\n",
      "alpha_tsk: [ 3.79998556 37.99855605 37.99855605 37.99855605  1.51999769]\n",
      "num_prev_task_classes: 65\n",
      "alpha_cls: [189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      " 189.96390686 189.96390686 189.96390686 189.96390686 189.96390686\n",
      "   7.59994224   7.59994224   7.59994224   7.59994224   7.59994224]\n",
      "alpha_tsk: [ 28.17574613 281.74782555 281.74782555 281.74782555  11.27032415]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ae6a1c78a64e4ab29e57b85cdcd9dee7",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Total Training Progress:   0%|          | 0/900 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------\n",
      "Epoch 1/30\n",
      "---------------\n",
      "Epoch 1/30 - Train Loss: 65.7014\n",
      "Per Task Accuracy: {0: 0.9969954932398598, 1: 0.0, 2: 0.0, 3: 0.0038378669539455967, 4: 0.02662968099861304}\n",
      "Validation Loss: 472.4516 - Validation Accuracy: 0.4244\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 2/30\n",
      "---------------\n",
      "Epoch 2/30 - Train Loss: 23.5288\n",
      "Per Task Accuracy: {0: 0.9967951927891838, 1: 0.0, 2: 0.0, 3: 0.0043091839482897925, 4: 0.00665742024965326}\n",
      "Validation Loss: 339.1445 - Validation Accuracy: 0.5911\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 3/30\n",
      "---------------\n",
      "Epoch 3/30 - Train Loss: 20.9701\n",
      "Per Task Accuracy: {0: 0.9821607411116675, 1: 0.0, 2: 0.0, 3: 0.018448693778615674, 4: 0.008876560332871012}\n",
      "Validation Loss: 341.2060 - Validation Accuracy: 0.5909\n",
      "---------------\n",
      "Epoch 4/30\n",
      "---------------\n",
      "Epoch 4/30 - Train Loss: 19.8903\n",
      "Per Task Accuracy: {0: 0.9937906860290435, 1: 0.0, 2: 0.0, 3: 0.011984917856180985, 4: 0.02815533980582524}\n",
      "Validation Loss: 329.5384 - Validation Accuracy: 0.6023\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 5/30\n",
      "---------------\n",
      "Epoch 5/30 - Train Loss: 15.5174\n",
      "Per Task Accuracy: {0: 0.9971957936905358, 1: 0.0, 2: 0.0, 3: 0.05824131430110423, 4: 0.0}\n",
      "Validation Loss: 319.6795 - Validation Accuracy: 0.6111\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 6/30\n",
      "---------------\n",
      "Epoch 6/30 - Train Loss: 15.4741\n",
      "Per Task Accuracy: {0: 0.9977966950425639, 1: 0.0, 2: 0.0, 3: 0.05824131430110423, 4: 0.0}\n",
      "Validation Loss: 319.4663 - Validation Accuracy: 0.6124\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 7/30\n",
      "---------------\n",
      "Epoch 7/30 - Train Loss: 14.5644\n",
      "Per Task Accuracy: {0: 0.9979969954932398, 1: 0.0, 2: 0.0, 3: 0.05009426339886884, 4: 0.0}\n",
      "Validation Loss: 320.2416 - Validation Accuracy: 0.6141\n",
      "Accuracy increased, updating the best accuracy...\n",
      "---------------\n",
      "Epoch 8/30\n",
      "---------------\n",
      "Epoch 8/30 - Train Loss: 13.9154\n",
      "Per Task Accuracy: {0: 0.9977966950425639, 1: 0.0, 2: 0.0, 3: 0.05608672232695933, 4: 0.0}\n",
      "Validation Loss: 321.5215 - Validation Accuracy: 0.6130\n",
      "---------------\n",
      "Epoch 9/30\n",
      "---------------\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[8], line 96\u001b[0m\n\u001b[1;32m     94\u001b[0m criterion_label\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m     95\u001b[0m model\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m---> 96\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[43mtrain_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43mold_model\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     97\u001b[0m \u001b[43m            \u001b[49m\u001b[43mtask_class_orders\u001b[49m\u001b[43m,\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcombined_train_loader\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mval\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcombined_test_loader\u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     98\u001b[0m \u001b[43m            \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43mweight_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43mscheduler\u001b[49m\u001b[43m,\u001b[49m\u001b[43mlabel_mapping\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlabel_mapping\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m     99\u001b[0m \u001b[43m            \u001b[49m\u001b[43mnum_epochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_epochs\u001b[49m\u001b[43m,\u001b[49m\u001b[43muse_weighted_loss\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m    101\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(tsk\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m):\n\u001b[1;32m    102\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTask \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mi\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m:\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/train_evalv1.py:201\u001b[0m, in \u001b[0;36mtrain_model\u001b[0;34m(device, model, old_model, task_class_orders, dataloaders, criterion, criterion_label, weight_label, optimizer, scheduler, label_mapping, num_epochs, stop_accuracy, use_weighted_loss, weighted_loss_momentum)\u001b[0m\n\u001b[1;32m    198\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_epochs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    199\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m-\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m15\u001b[39m)\n\u001b[0;32m--> 201\u001b[0m train_loss, _, _, _ \u001b[38;5;241m=\u001b[39m \u001b[43mtrain_one_epoch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43mold_model\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdataloaders\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43mweight_label\u001b[49m\u001b[43m,\u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpbar\u001b[49m\u001b[43m,\u001b[49m\u001b[43mlabel_mapping_tensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtask_label_mapping_tensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclass_weights\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    202\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_epochs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m - Train Loss: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtrain_loss\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m.4f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    203\u001b[0m val_loss, val_accuracy, _,_ \u001b[38;5;241m=\u001b[39m validate_one_epoch(model, dataloaders[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mval\u001b[39m\u001b[38;5;124m'\u001b[39m], criterion,label_mapping_tensor, task_label_mapping_tensor,device)\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/train_evalv1.py:29\u001b[0m, in \u001b[0;36mtrain_one_epoch\u001b[0;34m(model, old_model, dataloader, criterion, criterion_label, weight_label, optimizer, device, pbar, label_mapping_tensor, task_label_mapping_tensor, class_weights)\u001b[0m\n\u001b[1;32m     26\u001b[0m inputs, rwa_labels \u001b[38;5;241m=\u001b[39m inputs\u001b[38;5;241m.\u001b[39mto(device), rwa_labels\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[1;32m     27\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n\u001b[0;32m---> 29\u001b[0m _, outputs,tsk_outputs,all_gate_weights \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     30\u001b[0m \u001b[38;5;66;03m#print(f\"all_gate_weights shape: {all_gate_weights.shape}\")\u001b[39;00m\n\u001b[1;32m     31\u001b[0m _, _,_,old_all_gate_weights \u001b[38;5;241m=\u001b[39m old_model(inputs)\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/networkv3.py:161\u001b[0m, in \u001b[0;36mArcMultiTaskModel.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m    157\u001b[0m gate_weights \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m    160\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mincremental_layers)):\n\u001b[0;32m--> 161\u001b[0m     features,gate_weight \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mincremental_layers\u001b[49m\u001b[43m[\u001b[49m\u001b[43midx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfeatures\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    162\u001b[0m     gate_weights\u001b[38;5;241m.\u001b[39mappend(gate_weight\u001b[38;5;241m.\u001b[39msqueeze())\n\u001b[1;32m    166\u001b[0m \u001b[38;5;66;03m# 计算task_predictions------------------------------------------------------\u001b[39;00m\n\u001b[1;32m    167\u001b[0m \u001b[38;5;66;03m# 初始化task_predictions列表\u001b[39;00m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/workspace/cascade_incremental/cascade_libary/networkv3.py:81\u001b[0m, in \u001b[0;36mIncrementalTaskLayer.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m     78\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[1;32m     79\u001b[0m     \u001b[38;5;66;03m#print('x shape:', x.shape)\u001b[39;00m\n\u001b[1;32m     80\u001b[0m     \u001b[38;5;66;03m# 通过门控层\u001b[39;00m\n\u001b[0;32m---> 81\u001b[0m     gated_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlayer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     83\u001b[0m     \u001b[38;5;66;03m# 通过门控权重生成网络，生成门控权重\u001b[39;00m\n\u001b[1;32m     84\u001b[0m     \u001b[38;5;66;03m#print('torch.cat([x, gated_output]:', torch.cat([x, gated_output], dim=1).shape)\u001b[39;00m\n\u001b[1;32m     85\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m'''#NT2\u001b[39;00m\n\u001b[1;32m     86\u001b[0m \u001b[38;5;124;03m    gate_weight = self.gate_generator(torch.cat([x, gated_output], dim=1)).unsqueeze(-1).unsqueeze(-1)'''\u001b[39;00m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/container.py:250\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    248\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[1;32m    249\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m module \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m:\n\u001b[0;32m--> 250\u001b[0m         \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m    251\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28minput\u001b[39m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1736\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1734\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1735\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1736\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/module.py:1747\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1742\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1743\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1744\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1745\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1746\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1749\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1750\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/batchnorm.py:160\u001b[0m, in \u001b[0;36m_BatchNorm.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    159\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 160\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_check_input_dim\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m    162\u001b[0m     \u001b[38;5;66;03m# exponential_average_factor is set to self.momentum\u001b[39;00m\n\u001b[1;32m    163\u001b[0m     \u001b[38;5;66;03m# (when it is available) only so that it gets updated\u001b[39;00m\n\u001b[1;32m    164\u001b[0m     \u001b[38;5;66;03m# in ONNX graph when this node is exported to ONNX.\u001b[39;00m\n\u001b[1;32m    165\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmomentum \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
      "File \u001b[0;32m~/anaconda3/envs/dl/lib/python3.12/site-packages/torch/nn/modules/batchnorm.py:451\u001b[0m, in \u001b[0;36mBatchNorm2d._check_input_dim\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    450\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_check_input_dim\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[0;32m--> 451\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28;43minput\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdim\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m4\u001b[39m:\n\u001b[1;32m    452\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mexpected 4D input (got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28minput\u001b[39m\u001b[38;5;241m.\u001b[39mdim()\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124mD input)\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "for tsk in range(1,10):\n",
    "    old_model=deepcopy(model).to(device)\n",
    "    model.add_arc_task_classifier(feature_dim=512, num_classes_per_task=num_classes_per_task[tsk], s=s, m=m)\n",
    "    model.add_incremental_task_layer() \n",
    "    #print('tsk:',tsk)\n",
    "    '''freeze_prefix = init_freeze_prefixes\n",
    "    if tsk == 0:\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk))\n",
    "    else:\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk)+'.layer' )\n",
    "\n",
    "    freeze_prefix.append('arc_classifiers.' + str(tsk))'''\n",
    "\n",
    "    #-----------------------------------------------\n",
    "    init_freeze_prefixes = [\n",
    "    'feature_extractor',\n",
    "    #'avgpool',\n",
    "    #'fc',\n",
    "    ]\n",
    "    print('tsk:',tsk)\n",
    "    #print('tsk:',tsk)\n",
    "    freeze_prefix = init_freeze_prefixes\n",
    "\n",
    "    if tsk == 1:\n",
    "         freeze_prefix.append( 'incremental_layers.' + str(tsk-1) )\n",
    "    else:\n",
    "        for layer in range(0,tsk-1):\n",
    "            freeze_prefix.append( 'incremental_layers.' + str(layer)+'.layer' )\n",
    "        freeze_prefix.append( 'incremental_layers.' + str(tsk-1)+'.layer' )\n",
    "    \n",
    "    if tsk> 1:\n",
    "        for layer in range(0,tsk-1):\n",
    "            freeze_prefix.append('arc_classifiers.' + str(layer))\n",
    "        #freeze_prefix.append('arc_classifiers.' + str(layer))\n",
    "    #-----------------------------------------------\n",
    "    print('freeze_prefix:',freeze_prefix)\n",
    "    freeze_layers(model, freeze_prefix)\n",
    "\n",
    "    combined_train_set = []\n",
    "    for i in range(tsk):\n",
    "        #print(i)\n",
    "        # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "        # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "        combined_train_set.append(exampler_train_sets[i])\n",
    "    combined_train_set.append(origin_train_set[tsk])\n",
    "    combined_train_set=ConcatDataset(combined_train_set)\n",
    "    # 创建一个新的DataLoader来加载合并后的数据集\n",
    "    combined_train_loader = DataLoader(MyDataset(combined_train_set,transform=transform_train), \n",
    "                                    batch_size=batch_size, shuffle=True, num_workers=4)\n",
    "\n",
    "\n",
    "    # 使用ConcatDataset合并这两个数据集\n",
    "    combined_test_set = []\n",
    "    for i in range(tsk+1):\n",
    "        # 这里我们简单地将整个 combined_test_set 作为一个集合添加到列表中\n",
    "        # 如果你需要根据索引来选择特定的元素，你可以修改这里的代码\n",
    "        combined_test_set.append(origin_test_set[i])\n",
    "    combined_test_set=ConcatDataset(combined_test_set)\n",
    "    # 创建一个新的DataLoader来加载合并后的数据集\n",
    "    combined_test_loader = DataLoader(MyDataset(combined_test_set,transform=transform_test), \n",
    "                                    batch_size=batch_size, shuffle=False, num_workers=4)\n",
    "    \n",
    "\n",
    "    num_epochs=30\n",
    "    weight_decay=0.01\n",
    "    max_lr=0.01\n",
    "    #weight_label=1.5+((tsk-1)*0.16)\n",
    "    weight_label=1.5\n",
    "    # 创建调度器和优化器\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=max_lr,weight_decay=weight_decay)\n",
    "    scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr, epochs=num_epochs, \n",
    "                                                    steps_per_epoch=len(combined_train_loader),three_phase=False)\n",
    "    class_stats = get_class_stats(combined_train_loader)\n",
    "    # 计算补齐后的 alpha\n",
    "    alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "    alpha_cls,alpha_tsk = sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders)\n",
    "    print(\"alpha_cls:\", alpha_cls)\n",
    "    print(\"alpha_tsk:\", alpha_tsk)\n",
    "    alpha_cls,alpha_tsk = reweight_alphas(task_class_orders=task_class_orders, tsk=tsk, \n",
    "                    alpha_cls=alpha_cls,alpha_tsk=alpha_tsk,\n",
    "                    w_cls=1, w_tsk=1)\n",
    "    \n",
    "    #alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "    alpha_tsk=alpha_cls.mean()/alpha_tsk.mean()*alpha_tsk\n",
    "    print(\"alpha_cls:\", alpha_cls)\n",
    "    print(\"alpha_tsk:\", alpha_tsk)\n",
    "    criterion = MultiClassFocalLossWithAlpha(device,alpha_cls,gamma=0,reduction='mean') \n",
    "    #criterion_label = MultiClassFocalLossWithAlpha(device,alpha_tsk,gamma=0,reduction='mean') \n",
    "    criterion_label=None\n",
    "    model.to(device)\n",
    "    model=train_model(device, model,old_model, \n",
    "                task_class_orders,{'train': combined_train_loader, 'val': combined_test_loader}, \n",
    "                criterion, criterion_label,weight_label,optimizer,scheduler,label_mapping=label_mapping, \n",
    "                num_epochs=num_epochs,use_weighted_loss=False)\n",
    "\n",
    "    for i in range(tsk+1):\n",
    "        print(f'Task {i+1}:')\n",
    "        loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[i]})\n",
    "        print(f'Loss on test set: {loss:.4f}')\n",
    "        print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "    # 在完整数据集上评估模型\n",
    "    loss, accuracy = evaluate_model(device,label_mapping,model, {'val': combined_test_loader}, criterion)\n",
    "    print(f'Loss on test set: {loss:.4f}')\n",
    "    print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n",
    "    # 保存模型\n",
    "    # 格式化准确率，保留三位小数，并将其转换为字符串\n",
    "    accuracy_str = f\"{accuracy:.3f}\"\n",
    "    current_time = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "    # 使用准确率值作为文件名的一部分\n",
    "    file_name = f\"Loop_branch_{str(tsk)}_{accuracy_str}_{current_time}.pth\"\n",
    "    # 确保保存路径存在，如果不存在则创建\n",
    "    task_save_pth = os.path.join(model_save_pth, str(tsk))\n",
    "    if not os.path.exists(task_save_pth):\n",
    "        os.makedirs(task_save_pth, exist_ok=True)\n",
    "    # 保存模型到指定路径\n",
    "    torch.save(model.state_dict(), os.path.join(task_save_pth, file_name))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Task 1:\n",
      "Loss on test set: 1.9638\n",
      "Accuracy on test set: 0.6066\n",
      "Task 2:\n",
      "Loss on test set: 2.6106\n",
      "Accuracy on test set: 0.3800\n",
      "Task 3:\n",
      "Loss on test set: 2.6340\n",
      "Accuracy on test set: 0.4160\n",
      "Task 4:\n",
      "Loss on test set: 2.3775\n",
      "Accuracy on test set: 0.4120\n",
      "Task 5:\n",
      "Loss on test set: 2.9290\n",
      "Accuracy on test set: 0.3400\n",
      "Task 6:\n",
      "Loss on test set: 2.2082\n",
      "Accuracy on test set: 0.4640\n",
      "Task 7:\n",
      "Loss on test set: 3.2342\n",
      "Accuracy on test set: 0.2580\n",
      "Task 8:\n",
      "Loss on test set: 2.9716\n",
      "Accuracy on test set: 0.3100\n",
      "Task 9:\n",
      "Loss on test set: 2.7363\n",
      "Accuracy on test set: 0.3620\n",
      "Task 10:\n",
      "Loss on test set: 1.0429\n",
      "Accuracy on test set: 0.6740\n"
     ]
    }
   ],
   "source": [
    "for tsk in range(10):\n",
    "    print(f'Task {tsk+1}:')\n",
    "    loss, accuracy = evaluate_model(device,label_mapping,model, {'val': test_loaders[tsk]})\n",
    "    print(f'Loss on test set: {loss:.4f}')\n",
    "    print(f'Accuracy on test set: {accuracy:.4f}')\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "dl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
