{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'models'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-1-6663821c9f1d>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mModel_Wrapper\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     13\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mdatasets\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mget_trainval_datasets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'models'"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import matplotlib.pyplot as plt\n",
    "import glob\n",
    "import os\n",
    "from tqdm import tqdm\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "from models import Model_Wrapper\n",
    "\n",
    "from datasets import get_trainval_datasets\n",
    "from utils import CenterLoss, AverageMeter, TopKAccuracyMetric, ModelCheckpoint, batch_augment, ConsistencyEvaluation, KMeans\n",
    "\n",
    "import sys \n",
    "sys.path.append('..')\n",
    "import common.cub_attributions as attribution_method\n",
    "\n",
    "import numpy as np\n",
    "from sklearn.manifold import TSNE\n",
    "from matplotlib import pyplot as plt\n",
    "\n",
    "from torchvision import transforms\n",
    "ToPILImage = transforms.ToPILImage()\n",
    "from PIL import Image\n",
    "\n",
    "\n",
    "import config_distributed as config\n",
    "torch.set_printoptions(profile=\"full\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The autoreload extension is already loaded. To reload it, use:\n",
      "  %reload_ext autoreload\n"
     ]
    }
   ],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "### for reproducebility\n",
    "seed = 199\n",
    "torch.manual_seed(seed)\n",
    "np.random.seed(seed)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### read class-wise ground-truth attributes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [],
   "source": [
    "DATAPATH ='/storage/rong/CUB_200_2011'\n",
    "attribute_cls = []\n",
    "with open(os.path.join(DATAPATH, 'CUB_200_2011/attributes/class_attribute_labels_continuous.txt')) as f:\n",
    "    for line in f.readlines():\n",
    "        attr = line.strip().split(' ')\n",
    "        attr_float= torch.tensor([float(att) for att in attr])\n",
    "        attribute_cls.append(attr_float)\n",
    "attribute_cls = torch.stack(attribute_cls)\n",
    "#attribute_cls = F.normalize(attribute_cls, dim=1, p=2)\n",
    "\n",
    "attribute_names = {}\n",
    "with open(os.path.join(DATAPATH, 'attributes.txt')) as f:\n",
    "    for line in f.readlines():\n",
    "        attr = line.strip().split(' ')\n",
    "        attribute_names[attr[0]] = attr[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [],
   "source": [
    "### store results\n",
    "ig = []\n",
    "sg = []\n",
    "grad = []\n",
    "rand = []\n",
    "pca = []\n",
    "ica = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load model and dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {},
   "outputs": [],
   "source": [
    "# GPU settings\n",
    "assert torch.cuda.is_available()\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = config.GPU\n",
    "device = torch.device(\"cuda:0\")\n",
    "torch.backends.cudnn.benchmark = True\n",
    "\n",
    "train_dataset, validate_dataset = get_trainval_datasets(config.tag, (224,224))\n",
    "\n",
    "validate_loader = DataLoader(validate_dataset, batch_size=32, shuffle=False,\n",
    "                                            num_workers=config.workers, pin_memory=True)\n",
    "\n",
    "num_classes = validate_dataset.num_classes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [],
   "source": [
    "def concept_score(H, vector_dict):\n",
    "    ### norm H\n",
    "    # H_n = H\n",
    "    H_n = F.normalize(H, p=2.0, dim=1)\n",
    "    # v = F.normalize(v, p=2.0, dim=1)\n",
    "\n",
    "    ### project vectors onto the discorved directions\n",
    "    id_list = []\n",
    "    score_list = []\n",
    "\n",
    "    for k,v in vector_dict.items():\n",
    "        id_list.append(k)\n",
    "        # v = v/torch.norm(v)\n",
    "        score_list.append(torch.matmul(H_n, v))\n",
    "        \n",
    "    return id_list, score_list\n",
    "\n",
    "def concept_score_reduction(vector_dict, reduction='pca', seed=None):\n",
    "    if reduction == 'pca':\n",
    "        from sklearn.decomposition import PCA\n",
    "        reduction = PCA(n_components=30)\n",
    "    elif reduction == 'ica':\n",
    "        from sklearn.decomposition import FastICA\n",
    "        reduction = FastICA(n_components=30, random_state=seed)\n",
    "    else:\n",
    "        print('Not implemented')\n",
    "    ### vector reduction using PCA\n",
    "    id_list = []\n",
    "    vector_list = []\n",
    "\n",
    "    for k,v in vector_dict.items():\n",
    "        id_list.append(k)\n",
    "        # v = v/torch.norm(v)\n",
    "        vector_list.append(v)\n",
    "    vector_matrix = torch.stack(vector_list).numpy()\n",
    "    vector_matrix = torch.tensor(reduction.fit_transform(vector_matrix))\n",
    "    score_list = list(vector_matrix)\n",
    "\n",
    "    return id_list, score_list\n",
    "\n",
    "\n",
    "def concept_score_cls(score_list, id_list, validate_dataset=validate_dataset):\n",
    "    ### scores on class-level\n",
    "    score_cls = torch.zeros((200,30))\n",
    "    num_sample_cls = torch.zeros((200,1))\n",
    "    for s_list, id in zip (score_list, id_list):\n",
    "        score_cls[validate_dataset[id][1], :] += s_list\n",
    "        num_sample_cls[validate_dataset[id][1],:] += 1\n",
    "    score_cls /= num_sample_cls\n",
    "    return score_cls\n",
    "\n",
    "def concept_score_num_cls(score_list, id_list, validate_dataset=validate_dataset):\n",
    "    ### scores on class-level\n",
    "    score_cls = torch.zeros((200,60))\n",
    "    num_sample_cls = torch.zeros((200,1))\n",
    "    for s_list, id in zip (score_list, id_list):\n",
    "        score_pos = torch.relu(s_list)\n",
    "        score_neg = torch.relu(s_list*(-1))\n",
    "\n",
    "        s_list_bidir = torch.cat([score_pos, score_neg])\n",
    "        #print(s_list_bidir)\n",
    "        score_cls[validate_dataset[id][1], :] += s_list_bidir\n",
    "        #print(s_list_bidir)\n",
    "        num_sample_cls[validate_dataset[id][1],:] += 1\n",
    "    score_cls /= num_sample_cls\n",
    "    return score_cls\n",
    "\n",
    "def topK_corr(score_cls, attribute_cls, topN=5, n_C=60):\n",
    "    corr = np.corrcoef(torch.cat((score_cls, attribute_cls), dim=1).t())\n",
    "    corr_c2attr = abs(corr[:n_C, n_C:])\n",
    "    topN = 5\n",
    "    corr_v = np.zeros((n_C, topN))\n",
    "    corr_ind = np.zeros((n_C, topN))\n",
    "    ind_sort = np.argsort(corr_c2attr, axis=1)\n",
    "    ind_sort = ind_sort[:,::-1]\n",
    "\n",
    "    for c in range(n_C):\n",
    "        corr_v[c,:] = corr_c2attr[c, ind_sort[c,:][:topN]]\n",
    "        corr_ind[c,:] = ind_sort[c,:][:topN]\n",
    "    return corr_v, corr_ind\n",
    "\n",
    "def compute_corr(ranks):\n",
    "    \"\"\" Compute spearman rank correlation of the rank matrix. \n",
    "        ranks = (M, R) matrix (M-concepts, R-Ranks in range [1...R])\n",
    "        return (M,M) matrix with rank correlations between concepts.\n",
    "    \"\"\"\n",
    "    num_input = ranks.shape[0]\n",
    "    resmat = np.zeros((num_input,num_input))\n",
    "    for i in range(num_input):\n",
    "        for j in range(num_input):\n",
    "            cov = np.mean(ranks[i]*ranks[j], axis=0)-np.mean(ranks[i], axis = 0)*np.mean(ranks[j], axis=0) # E[XY]-E[Y]E[X]\n",
    "            corr = cov/(np.std(ranks[i], axis=0)*np.std(ranks[j], axis=0))\n",
    "            #print(corr)\n",
    "            resmat[i,j] = np.mean(corr)\n",
    "    return resmat\n",
    "\n",
    "def rank_corr(allres):\n",
    "    \"\"\" Return the rank correlation of concepts. allres = (n_concepts, n_samples)\n",
    "    \"\"\"\n",
    "    order = allres.argsort(axis=1)\n",
    "    ranks = order.argsort(axis=1)\n",
    "    return compute_corr(ranks)\n",
    "\n",
    "def topK_rank_corr(score_cls, attribute_cls, topN=5, n_C=60):\n",
    "    from scipy import stats\n",
    "    #### the following tow lines are for debugging \n",
    "    # corr = rank_corr(torch.cat((score_cls, attribute_cls), dim=1).t().numpy())\n",
    "    # print(corr.shape)\n",
    "    corr, pval = stats.spearmanr(torch.cat((score_cls, attribute_cls), dim=1))\n",
    "    # corr = np.corrcoef(torch.cat((score_cls, attribute_cls), dim=1).t())\n",
    "    corr_c2attr = abs(corr[:n_C, n_C:])\n",
    "    topN = 5\n",
    "    corr_v = np.zeros((n_C, topN))\n",
    "    corr_ind = np.zeros((n_C, topN))\n",
    "    ind_sort = np.argsort(corr_c2attr, axis=1)\n",
    "    ind_sort = ind_sort[:,::-1]\n",
    "\n",
    "    for c in range(n_C):\n",
    "        corr_v[c,:] = corr_c2attr[c, ind_sort[c,:][:topN]]\n",
    "        corr_ind[c,:] = ind_sort[c,:][:topN]\n",
    "    return corr_v, corr_ind"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## OA analysis"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:06<00:00, 27.13it/s]\n"
     ]
    }
   ],
   "source": [
    "start_epoch = 0\n",
    "net = Model_Wrapper(num_classes=num_classes, M=config.num_attentions, net=config.net, pretrained=True, pth_path=config.pretrain_path)\n",
    "\n",
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Yao: replace the following cell with the directions found by other concept models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "224_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "224_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "224_model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "224_model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 4 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho_oa/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'resnet50_224/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Yao:\n",
    "- \"concept_score()\" is the function for score calculation. Please replace this with score calculation from other methods.\n",
    "\n",
    "- \"id\" is used to find out which class the image belongs to."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho_oa/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6335959603939085 0.61337164108671 0.6005391194704309\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7721233172781807 0.6677805706530525 0.6153252844083752\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6958795382141835 0.6219998596202692 0.5945798497048873\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Yao: You probably only need cells until here\n",
    "\n",
    "---------------------------\n",
    "----------------------------"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.7429218420203433, 0.7402664719307781, 0.7093517894773727, 0.6830565029697954, 0.6577320675641152, 0.6375110265671289, 0.6221540261114047, 0.6098934826536111, 0.5988948523009813, 0.5875547422099597, 0.5729722100262313, 0.559966949335359, 0.5488833498135584, 0.5381326361590446, 0.5284921658923135, 0.5199615203887511, 0.5110681906916974, 0.5029285819938726, 0.49558903520142406, 0.48890437948852483, 0.4825292348247495, 0.47659318975186493, 0.4706062671007878, 0.46495691740703776, 0.4597353258719747, 0.4547604927364366, 0.449497129786282, 0.4445536531362504, 0.43990246335772093, 0.43555326424810376, 0.43145532775901346, 0.42744044223372835, 0.42366772746012965, 0.42006971534916715, 0.41662420597397176, 0.4129846563165732, 0.40948739206442925, 0.406123129600053, 0.4028693265430699, 0.39976571602037436, 0.3967661854405774, 0.3938628394103357, 0.3910832452242328, 0.3884186658384204, 0.385762392492022, 0.38318894983100593, 0.38055963063886256, 0.3778114995684494, 0.3751624803187172, 0.37259574125744827, 0.37012671198946945, 0.36761469965445487, 0.3650904471024268, 0.3626559441545793, 0.3602935586414821, 0.35797090210697785, 0.35570877811081425, 0.35349671001821603, 0.35133187015073336, 0.34892229043389467]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.47712420623962637, 0.47685223522569187, 0.47329319840776246, 0.4688934429863825, 0.46365014857442527, 0.4594705844341969, 0.45479069081431067, 0.4511987699598048, 0.4478463995859069, 0.4449800834122308, 0.44181865744081866, 0.43704706162525303, 0.43282431569920476, 0.4287239635884039, 0.4248513166340707, 0.4210955733599572, 0.41732162805240486, 0.41395853741292926, 0.41089274863393915, 0.40807742889018594, 0.4054988517267244, 0.4030841724214843, 0.4007229513164664, 0.3981363816178673, 0.39567382682918656, 0.39324206256520305, 0.3908252647223217, 0.38855181097923014, 0.38642601947991595, 0.38443010981104686, 0.3823358390418787, 0.3803152159732939, 0.3784118319783091, 0.37661886098520625, 0.37469544710562586, 0.3728520394701209, 0.3708904414478104, 0.3688114529713022, 0.3667775176451601, 0.36477864087334766, 0.3628178540443262, 0.3609104258707195, 0.3590368406795995, 0.3572245133895806, 0.35547646508608627, 0.35374969441411575, 0.3519800174145525, 0.35019875293248753, 0.34840670077431446, 0.34668390180035713, 0.34499772281918284, 0.3432557232222343, 0.3414296293376581, 0.3396563161559659, 0.33786668329807257, 0.3361394712243965, 0.3344662838532092, 0.33275416090312854, 0.33101264516089735, 0.32930538057837083]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.7356870258464355, 0.7208403760292109, 0.7059456774421325, 0.6932686428680467, 0.6839799599663801, 0.677663968120599, 0.672845192704843, 0.665866473379878, 0.6593697846260026, 0.6510336474596408, 0.6434895139280506, 0.6362668864961608, 0.6301253720639498, 0.6248180073158569, 0.6200186349900676, 0.6154444476240404, 0.6111954312269423, 0.6062995950563121, 0.601755730000673, 0.5973673731987275, 0.5933199256341444, 0.5896226173248738, 0.5861862012900877, 0.5825698439669614, 0.5790565935352174, 0.5757085532070172, 0.5724587960009165, 0.5693764480945722, 0.5664492784429229, 0.5636113093603972, 0.560875710927358, 0.5582838472269103, 0.5557505138888249, 0.5533077500659279, 0.551003332940007, 0.5488166392973483, 0.5465705751302716, 0.5441521261008475, 0.5418429325698586, 0.5395982089224359, 0.5373377200615573, 0.5351720691108024, 0.5330108739768493, 0.5308879072030667, 0.528843698128741, 0.5268649246354086, 0.524802113236011, 0.5228188058771553, 0.5207510357875429, 0.5186917248276613, 0.5166790142746169, 0.5146530304609935, 0.5126447144500967, 0.51058846616377, 0.508500485952349, 0.5064866329362878, 0.5042276825576301, 0.5019674009512591, 0.4994960033954482, 0.49668601180470556]\n",
      "[0.73568703 0.72084038 0.70594568 0.69326864 0.68397996 0.67766397\n",
      " 0.67284519 0.66586647 0.65936978 0.65103365 0.64348951 0.63626689\n",
      " 0.63012537 0.62481801 0.62001863 0.61544445 0.61119543 0.6062996\n",
      " 0.60175573 0.59736737 0.59331993 0.58962262 0.5861862  0.58256984\n",
      " 0.57905659 0.57570855 0.5724588  0.56937645 0.56644928 0.56361131\n",
      " 0.56087571 0.55828385 0.55575051 0.55330775 0.55100333 0.54881664\n",
      " 0.54657058 0.54415213 0.54184293 0.53959821 0.53733772 0.53517207\n",
      " 0.53301087 0.53088791 0.5288437  0.52686492 0.52480211 0.52281881\n",
      " 0.52075104 0.51869172 0.51667901 0.51465303 0.51264471 0.51058847\n",
      " 0.50850049 0.50648663 0.50422768 0.5019674  0.499496   0.49668601] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:06<00:00, 27.33it/s]\n"
     ]
    }
   ],
   "source": [
    "start_epoch = 0\n",
    "net = Model_Wrapper(num_classes=num_classes, M=config.num_attentions, net=config.net, pretrained=True, pth_path=config.pretrain_path)\n",
    "\n",
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed1'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed1_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed1_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed1_model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed1_model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 8 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho_oa/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed1/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho_oa/checkpoint-checkpoints/seed1/model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6817578662796454 0.6582228849749447 0.6148400295930666\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed1/model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.644544468775978 0.614043219035237 0.5866718896128986\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed1/model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6637531656233313 0.64876154878709 0.6191363190691319\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.7893771815904295, 0.7888188086309602, 0.7595429757128512, 0.7378706726472032, 0.7085431095966646, 0.6872195812116256, 0.6595097341778817, 0.6384887904178647, 0.6217259482005457, 0.603736683095617, 0.5885942701375941, 0.5746582514261578, 0.5619619129766334, 0.5492080996663053, 0.5380852508466902, 0.5275291423436117, 0.51775774590116, 0.5089307880692437, 0.5007490168938502, 0.4933251574291909, 0.4861262744673804, 0.4793870461396468, 0.4731618838135866, 0.4671391570212912, 0.461113155700162, 0.4553955498223243, 0.45005908008028533, 0.44491352437066173, 0.4400853176753915, 0.43551764525375547, 0.431234100530705, 0.4271959423117855, 0.42319895378018657, 0.419420264824245, 0.4157615145810193, 0.4121455255680682, 0.40848692403942827, 0.4049860402924366, 0.40159336203954776, 0.3981022387390682, 0.3944596319492967, 0.3909042046463865, 0.38741372451851924, 0.3840638047035087, 0.38080445827231113, 0.3776013834016864, 0.37441174507693126, 0.37133478790227126, 0.368378576747854, 0.3655282139340376, 0.36271071171115127, 0.35994215285688175, 0.3572650206123293, 0.35455749173750817, 0.3519361067885048, 0.3493457533288828, 0.34673966891201735, 0.34413720935434644, 0.34159998156593646, 0.33850128050732736]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.5349680081486605, 0.5025348386657845, 0.4890919107925704, 0.4794994949330907, 0.47273968906258246, 0.4673904632231156, 0.46323409555000084, 0.4587849431189083, 0.4548138749937729, 0.451283314784105, 0.44733695884671004, 0.4439350329870928, 0.4404764817727509, 0.4373423571355373, 0.43455895330263666, 0.43188542753026904, 0.42947638235881314, 0.4273154865850713, 0.4251454890432999, 0.42310456657068646, 0.4208962356054077, 0.4186068189668541, 0.4164660418040768, 0.41437180147002883, 0.41241338573658465, 0.41031318547422246, 0.408316757963307, 0.40623045801257135, 0.4041551061014376, 0.40217285547028064, 0.4001877624328625, 0.3982888795308377, 0.39624555511321435, 0.3943060385068996, 0.3923058173853014, 0.3904011982926561, 0.3885426071759974, 0.38649179509227594, 0.38452992612913506, 0.3826311571390953, 0.38082076945500276, 0.37908696852608736, 0.3772767402657319, 0.37547873370599644, 0.373702840991559, 0.371996905953713, 0.37033634700274765, 0.368741832960165, 0.3671715331825653, 0.3655360857896571, 0.36386090458412695, 0.36207845913394404, 0.3603378253819805, 0.3586545758689778, 0.3569852930925213, 0.35532214385028194, 0.3536706023350681, 0.3518128483316073, 0.34980734180081924, 0.3477319264601011]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.6603427376127886, 0.6487433097582389, 0.6387457621243943, 0.631790218611103, 0.6259347274118308, 0.6214875482654166, 0.6182474253290479, 0.6157316190212643, 0.6126685958653351, 0.6091954242916705, 0.6055865397055841, 0.6022737869773649, 0.5994617322348587, 0.597010296922832, 0.5944989340600255, 0.5916241756109066, 0.5890781979217392, 0.586740484345033, 0.5845812465558952, 0.5822920011015245, 0.5801688189212528, 0.578113721894256, 0.5762263358552842, 0.5742701371613886, 0.5723897531942935, 0.570596667120997, 0.5688763019421504, 0.5669015179039315, 0.564979284830045, 0.5631446275845744, 0.5613456163284208, 0.5596381750929149, 0.5578858303716011, 0.5559480077288725, 0.5541197670944957, 0.5522951528167793, 0.5505062622565312, 0.5487713541194578, 0.5471125228812624, 0.5455233228214749, 0.5439924951904312, 0.5423598596389118, 0.5407626145357944, 0.5392109763970485, 0.5375567604454677, 0.5358369006704645, 0.5339744739648056, 0.5321746954453337, 0.5303887057751466, 0.5286616453828059, 0.5267606159996169, 0.5247573633747613, 0.5227604992247158, 0.5207209298635084, 0.5185691904803887, 0.5164339449546722, 0.5143402998569342, 0.5120755509011384, 0.5092803869486177, 0.50654008174851]\n",
      "[0.66034274 0.64874331 0.63874576 0.63179022 0.62593473 0.62148755\n",
      " 0.61824743 0.61573162 0.6126686  0.60919542 0.60558654 0.60227379\n",
      " 0.59946173 0.5970103  0.59449893 0.59162418 0.5890782  0.58674048\n",
      " 0.58458125 0.582292   0.58016882 0.57811372 0.57622634 0.57427014\n",
      " 0.57238975 0.57059667 0.5688763  0.56690152 0.56497928 0.56314463\n",
      " 0.56134562 0.55963818 0.55788583 0.55594801 0.55411977 0.55229515\n",
      " 0.55050626 0.54877135 0.54711252 0.54552332 0.5439925  0.54235986\n",
      " 0.54076261 0.53921098 0.53755676 0.5358369  0.53397447 0.5321747\n",
      " 0.53038871 0.52866165 0.52676062 0.52475736 0.5227605  0.52072093\n",
      " 0.51856919 0.51643394 0.5143403  0.51207555 0.50928039 0.50654008] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:06<00:00, 27.27it/s]\n"
     ]
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed2'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed2_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed2_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed2_model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed2_model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 4 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho_oa/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed2/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho_oa/checkpoint-checkpoints/seed2/model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6252536894567601 0.612207642456545 0.5990714878167313\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed2/model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7312899328930562 0.6640135404176049 0.6079218660159812\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed2/model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6257859489829062 0.611683257632485 0.5906357933602308\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.8018548931837765, 0.7979329040517569, 0.7471602487010784, 0.7200157399011142, 0.7017747144581945, 0.6884212762076397, 0.6622248230558032, 0.6413185240060969, 0.6248893697936491, 0.6065993973343746, 0.590866899784611, 0.5759836011186185, 0.5631239824007541, 0.5515332712866432, 0.5405983758068714, 0.5309590717753953, 0.5220239700804441, 0.5134056700798484, 0.5055385825268374, 0.4975051422486011, 0.4896635090843729, 0.48244080198354394, 0.475818398485967, 0.46906970926337416, 0.46285643267260396, 0.45668950172797224, 0.4509589124688617, 0.44556540908231795, 0.44049806581761125, 0.4357482270707008, 0.43129028193902946, 0.42708993024399944, 0.4231182418284475, 0.41919171121574567, 0.4154163685577583, 0.4118216165316917, 0.4083395194864361, 0.4050036690412534, 0.4015962276795902, 0.3983453163907787, 0.395167830567825, 0.3921122555177656, 0.38903720025155225, 0.3860332693931456, 0.38312916882032116, 0.38033603184436016, 0.3775634188997374, 0.37482951565771244, 0.3721822673379908, 0.3696393681596678, 0.3670298658087589, 0.3645111483941291, 0.3620554027343558, 0.35955635673385955, 0.3570969507484407, 0.3547245845684875, 0.35238893770205393, 0.35013257172055406, 0.3479286303019741, 0.34556607122870336]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.4758636578794053, 0.4686727312149974, 0.4648085681950356, 0.4626652101788706, 0.4610998165267361, 0.4552201443830439, 0.4510068102255908, 0.4475937873483097, 0.4446898311715042, 0.4414887857853843, 0.4376970447139279, 0.433281089956254, 0.42935267213575207, 0.42573353935534836, 0.42247849803675525, 0.4195125305020036, 0.416332068010164, 0.41340958079665846, 0.41043694988754903, 0.4077149861650328, 0.40493249872354425, 0.40238755258930925, 0.39995050778399077, 0.39758417165152715, 0.3951386764237768, 0.3924666794965053, 0.38996179130659114, 0.3875873527580838, 0.3853450659557418, 0.38317075902229814, 0.3810438957110748, 0.37888878716379987, 0.37685012656378714, 0.37487959099661505, 0.37279794603876915, 0.37072901571945427, 0.36872247345952625, 0.36676944642288467, 0.36488369431182266, 0.36303717406555736, 0.36127207947149054, 0.35954746912605295, 0.3578904175148971, 0.3562681268623473, 0.354661002040769, 0.353084684713817, 0.3515445148199728, 0.35000413070057884, 0.34851887452083785, 0.3470359159736695, 0.3456109407512907, 0.3441657118806786, 0.34262957699808516, 0.34111941256426, 0.33960123933903746, 0.3381015920036408, 0.33652152631433885, 0.33486195287305354, 0.33315301519678814, 0.3309535062494717]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.6698368565690288, 0.6673690550755906, 0.6567244339045023, 0.6480015408028368, 0.6411944727063725, 0.6305883568420901, 0.6204382189660657, 0.6126301215078591, 0.6063948778645565, 0.601400142379922, 0.5968577608343443, 0.5927377314492601, 0.5887513985018983, 0.5852916476378132, 0.582110369597616, 0.5793180122719469, 0.5767344278599232, 0.5744010959832875, 0.5720650230760821, 0.5699436252172168, 0.5679590250818933, 0.5661080088448084, 0.5641458260435261, 0.56213368833219, 0.5601740046767661, 0.5580458159409241, 0.555924076263167, 0.5539042267164727, 0.5519563218367646, 0.5500786765330294, 0.5479406142554964, 0.5459227933088725, 0.5439287226167938, 0.5419364478892049, 0.539689267139865, 0.5375500081333787, 0.5354800345056023, 0.5334555152570674, 0.5315153900513693, 0.5296421649091709, 0.527778848781405, 0.5259854013682856, 0.5242506911586986, 0.5225605367733577, 0.5208954299457171, 0.5191157951441139, 0.5174076966110551, 0.5157027576788343, 0.514063097194124, 0.5124376274230324, 0.5108218100677042, 0.5091598596015336, 0.5074539123519413, 0.5057623415394897, 0.5041300804166686, 0.5025190991896746, 0.5008782496994888, 0.49909207937859956, 0.4972305458021913, 0.495151901414183]\n",
      "[0.66983686 0.66736906 0.65672443 0.64800154 0.64119447 0.63058836\n",
      " 0.62043822 0.61263012 0.60639488 0.60140014 0.59685776 0.59273773\n",
      " 0.5887514  0.58529165 0.58211037 0.57931801 0.57673443 0.5744011\n",
      " 0.57206502 0.56994363 0.56795903 0.56610801 0.56414583 0.56213369\n",
      " 0.560174   0.55804582 0.55592408 0.55390423 0.55195632 0.55007868\n",
      " 0.54794061 0.54592279 0.54392872 0.54193645 0.53968927 0.53755001\n",
      " 0.53548003 0.53345552 0.53151539 0.52964216 0.52777885 0.5259854\n",
      " 0.52425069 0.52256054 0.52089543 0.5191158  0.5174077  0.51570276\n",
      " 0.5140631  0.51243763 0.51082181 0.50915986 0.50745391 0.50576234\n",
      " 0.50413008 0.5025191  0.50087825 0.49909208 0.49723055 0.4951519 ] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:06<00:00, 27.02it/s]\n"
     ]
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed3'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed3_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed3_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed3_model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed3_model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 4 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho_oa/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed3/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho_oa/checkpoint-checkpoints/seed3/model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.688713830311551 0.651081695909628 0.6033823937327246\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed3/model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6950040281528984 0.6570602924959431 0.6279170079354898\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed3/model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6250480162297024 0.6086053638171689 0.597648511292052\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.8046417220778795, 0.8022089266307891, 0.7330315103156891, 0.6970629456658866, 0.6737505669613808, 0.6581300275411481, 0.6443459170391154, 0.6338015660912106, 0.6190454523478379, 0.6019064338432609, 0.5876733235492447, 0.5749555273528372, 0.5638305394077835, 0.5531444954628532, 0.5434193539650567, 0.534329951625554, 0.5262858685562624, 0.5176202615559268, 0.5096660648224058, 0.502481053969147, 0.4959674901503507, 0.4897229938644918, 0.4837631150369475, 0.47813230128685213, 0.47273559113925934, 0.46751791217394284, 0.46254283490429204, 0.4578575942779481, 0.4533270468130107, 0.448870001383104, 0.44434579621771003, 0.44005374993211815, 0.4358333088766129, 0.43153814004511265, 0.42746135059756174, 0.42353275788458133, 0.4197215181055041, 0.41607814191617026, 0.41260081119262876, 0.4091520980355165, 0.4058706036225901, 0.40236611988841053, 0.39898628651252027, 0.39565386669759567, 0.3924153887748569, 0.3892369828445952, 0.3861500233517254, 0.3830612565268637, 0.3800615187950036, 0.377168473809021, 0.3743844523572906, 0.37164298969860055, 0.36899535059096233, 0.36643655987715507, 0.36390278885308347, 0.36144227516575217, 0.35880738523794986, 0.356253193030055, 0.35371029141733074, 0.351186504445077]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.5040984883914553, 0.4776915853000081, 0.46296082625531154, 0.45487819117276507, 0.449605020507773, 0.44422564921208, 0.4403508421224358, 0.4368730994095793, 0.4341538918113149, 0.4312443274605604, 0.4286852982238592, 0.42655013855705476, 0.42450513153098857, 0.4226041225397526, 0.4207692945782554, 0.4190613238061759, 0.41682938423720045, 0.4139263332011273, 0.41132768702549083, 0.40889300059347644, 0.40663140900272293, 0.4045234932190145, 0.4023722325986303, 0.400354344169443, 0.3983828376139125, 0.3965510901561023, 0.3947118007569603, 0.3929386302916151, 0.391169038215574, 0.38949899468678, 0.3878309875235809, 0.3857865820213014, 0.3838165894854054, 0.3819294558620518, 0.3801263138015648, 0.3783458271076942, 0.37653968052886083, 0.3747972914507096, 0.37309663085833394, 0.37130483857880103, 0.36955877542889404, 0.36785204103947894, 0.36618495204224577, 0.3645682100119506, 0.3630014291586159, 0.36140695408730134, 0.35983869392877477, 0.35824079013979554, 0.35669563026646456, 0.3550991046768051, 0.3535511808172124, 0.35190517922809, 0.3501559742477587, 0.3484055706553035, 0.3467033891611603, 0.34498003701222213, 0.34329506347415806, 0.3415355480051705, 0.3397078560311694, 0.3375457049829179]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.6827676398364029, 0.6661615761892962, 0.6561151864773259, 0.6474913012747519, 0.6403098645790098, 0.6351307413386659, 0.630628118844422, 0.6271005175570318, 0.6232295986523773, 0.6198792258803134, 0.6167205215896273, 0.6139702554614258, 0.6100236632804523, 0.6065938396855136, 0.6033875559825185, 0.600561502304146, 0.5978961579613324, 0.5953386247429973, 0.5927900223300093, 0.5904791887627465, 0.5873661415279356, 0.5844089774890197, 0.5816516653129357, 0.5790815864473846, 0.5767169021512798, 0.5741928679597094, 0.5718156998063681, 0.5695751825025012, 0.5674728566681957, 0.5653820203916061, 0.5632660396548218, 0.561177546157206, 0.5592035111866465, 0.5573392913348918, 0.5554591290284665, 0.5536277679279711, 0.5517682348584284, 0.5499980342423282, 0.5481917091411258, 0.5464523597174271, 0.5447523152346893, 0.5430124456806433, 0.5412929415182034, 0.5396423320184888, 0.537816682500268, 0.5359106683238355, 0.5339584064743336, 0.5320506252246978, 0.5302160273488223, 0.5282804041937914, 0.52632163327309, 0.5243858502966948, 0.5224268457721842, 0.5204435592603028, 0.5184119993680555, 0.5161947907128392, 0.5139697464388808, 0.5117991894707523, 0.5095250449811963, 0.5068215792421971]\n",
      "[0.68276764 0.66616158 0.65611519 0.6474913  0.64030986 0.63513074\n",
      " 0.63062812 0.62710052 0.6232296  0.61987923 0.61672052 0.61397026\n",
      " 0.61002366 0.60659384 0.60338756 0.6005615  0.59789616 0.59533862\n",
      " 0.59279002 0.59047919 0.58736614 0.58440898 0.58165167 0.57908159\n",
      " 0.5767169  0.57419287 0.5718157  0.56957518 0.56747286 0.56538202\n",
      " 0.56326604 0.56117755 0.55920351 0.55733929 0.55545913 0.55362777\n",
      " 0.55176823 0.54999803 0.54819171 0.54645236 0.54475232 0.54301245\n",
      " 0.54129294 0.53964233 0.53781668 0.53591067 0.53395841 0.53205063\n",
      " 0.53021603 0.5282804  0.52632163 0.52438585 0.52242685 0.52044356\n",
      " 0.518412   0.51619479 0.51396975 0.51179919 0.50952504 0.50682158] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:06<00:00, 27.19it/s]\n"
     ]
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed4'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed4_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed4_model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed4_model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed4_model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 5 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho_oa/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed4/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho_oa/checkpoint-checkpoints/seed4/model_bestacc.pth_attrib-grad_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6541075717733056 0.6152312289277233 0.5856710854568432\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed4/model_bestacc.pth_attrib-ig_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6810415116691911 0.6594326260175901 0.6265905832764171\n",
      "\n",
      "\n",
      "post_ortho_oa/checkpoint-checkpoints/seed4/model_bestacc.pth_attrib-sg_disj-False_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6543258309108969 0.6275420399995009 0.5916536559058\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.8066725381561981, 0.8047227864289791, 0.7396506642087367, 0.7066635481488853, 0.6864190029998514, 0.6691745892204913, 0.6561696878835682, 0.6464029726455401, 0.626123754518905, 0.6084609247900431, 0.5928654872794762, 0.5789144183149267, 0.5664298127375809, 0.5551727223009556, 0.5450699462813289, 0.5356517822272704, 0.5259646715409819, 0.5172854218510805, 0.5085820405524105, 0.5006438170683682, 0.49309048644930265, 0.48621842769591167, 0.47994255051598467, 0.474169803513961, 0.46878910102534804, 0.463807973043098, 0.4588469025117433, 0.454086692694008, 0.4496379041461711, 0.4447002063365352, 0.43958497952182685, 0.4347674594601062, 0.4301853477420289, 0.42575905377430834, 0.4215297147822198, 0.4174676685854145, 0.41351656867758496, 0.40971203034912757, 0.4060601954059989, 0.40256831224431194, 0.39915074002493484, 0.3958784584514986, 0.39275820103851344, 0.389756770946131, 0.3868730916163004, 0.3840854840109703, 0.3813617722609131, 0.37870551796098795, 0.3760407141580947, 0.37317996305147444, 0.3702922446472296, 0.3674902344428362, 0.36468117886492435, 0.3618767897743031, 0.35916243158252503, 0.35640974659215907, 0.35372187199872684, 0.3509301204410809, 0.34803709367120417, 0.3452349337295754]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.5547639081562986, 0.5158827181219763, 0.5016666432513867, 0.4899518052378893, 0.4812971818059298, 0.46987719046790516, 0.45886351194270514, 0.450162442872088, 0.44338272244350985, 0.43795642203332064, 0.4330356781474957, 0.4283326269674037, 0.42432698510192357, 0.42085840204900193, 0.4173845877622735, 0.4140143039311277, 0.4110383555612783, 0.4083180581016592, 0.4058393865131436, 0.4034608685013922, 0.40127937049551426, 0.3992940263275255, 0.39717733846501774, 0.3951962982192814, 0.39331824262508097, 0.39154678920803465, 0.38986422438144097, 0.3882340972463921, 0.38669686813961884, 0.3846654780020232, 0.38274391113343015, 0.3808788892799332, 0.3790391416673443, 0.37713615883041673, 0.37525203894862336, 0.3734676351448658, 0.37169333089786694, 0.3699592964982936, 0.3682052806039081, 0.3664398596757009, 0.3647440466251403, 0.36306858797335506, 0.36146278298809287, 0.35974151602823634, 0.3580546930559834, 0.3563868960576343, 0.3547494331924217, 0.3529835534276175, 0.3512759746355161, 0.34962927815720213, 0.3480006670952849, 0.34626324793652064, 0.34456037796107775, 0.34291441174505083, 0.3413124745422605, 0.33974744860576617, 0.33809882788558965, 0.33634289933882827, 0.3341643713953843, 0.33194859494960977]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.6344513119446099, 0.6230767086146516, 0.6143283961298108, 0.60866010017291, 0.6024338243329082, 0.5938700333154526, 0.5866305718897721, 0.580809180209458, 0.5759182100471868, 0.5717225340714538, 0.5681702204526363, 0.5651062182650138, 0.5624098278118401, 0.5596366013974174, 0.5569588424625194, 0.5543457230095254, 0.5517987108926288, 0.5494663430581193, 0.5473274252648684, 0.5453614636990018, 0.5433703753953731, 0.541062140587536, 0.538818092358282, 0.5367094135995353, 0.5347051548836207, 0.5328431367388149, 0.5310038498741704, 0.5288981456293814, 0.5269149003560201, 0.5249602536157831, 0.5229095940248308, 0.5209010211622019, 0.5189457608528951, 0.5170062364633914, 0.515146875874146, 0.5132946598455042, 0.5115197152213892, 0.5098184034979175, 0.5080722781133421, 0.5063555947206966, 0.5044752582427641, 0.502679896341068, 0.5009142415385305, 0.4991902466867044, 0.49744409407111184, 0.49575858838339965, 0.4940674115105142, 0.49214966351718453, 0.49019460527408876, 0.4882894203789062, 0.48630508432958447, 0.4843174750388016, 0.48225582428080954, 0.4802315962858721, 0.47814969928116086, 0.47610618103683866, 0.47410335378418883, 0.47213391969760443, 0.4699951841738138, 0.4677492876944347]\n",
      "[0.63445131 0.62307671 0.6143284  0.6086601  0.60243382 0.59387003\n",
      " 0.58663057 0.58080918 0.57591821 0.57172253 0.56817022 0.56510622\n",
      " 0.56240983 0.5596366  0.55695884 0.55434572 0.55179871 0.54946634\n",
      " 0.54732743 0.54536146 0.54337038 0.54106214 0.53881809 0.53670941\n",
      " 0.53470515 0.53284314 0.53100385 0.52889815 0.5269149  0.52496025\n",
      " 0.52290959 0.52090102 0.51894576 0.51700624 0.51514688 0.51329466\n",
      " 0.51151972 0.5098184  0.50807228 0.50635559 0.50447526 0.5026799\n",
      " 0.50091424 0.49919025 0.49744409 0.49575859 0.49406741 0.49214966\n",
      " 0.49019461 0.48828942 0.48630508 0.48431748 0.48225582 0.4802316\n",
      " 0.4781497  0.47610618 0.47410335 0.47213392 0.46999518 0.46774929] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### summary for five seeds"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "np_grad = np.asarray(grad)\n",
    "print(\"grad: \", np.mean(np_grad,axis=0), np.std(np_grad, axis=0))\n",
    "print('\\n')\n",
    "np_ig = np.asarray(ig)\n",
    "print(\"ig: \", np.mean(np_ig,axis=0), np.std(np_ig, axis=0))\n",
    "print('\\n')\n",
    "np_sg = np.asarray(sg)\n",
    "print(\"sg: \", np.mean(np_sg,axis=0), np.std(np_sg, axis=0))\n",
    "print('\\n')\n",
    "np_rand = np.asarray(rand)\n",
    "print(\"rand: \", np.mean(np_rand,axis=0), np.std(np_rand, axis=0))\n",
    "print('\\n')\n",
    "np_pca = np.asarray(pca)\n",
    "print(\"pca: \", np.mean(np_pca,axis=0), np.std(np_pca, axis=0))\n",
    "print('\\n')\n",
    "np_ica = np.asarray(ica)\n",
    "print(\"ica: \", np.mean(np_ica,axis=0), np.std(np_ica, axis=0))\n",
    "print('\\n')\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "rand:  0.6766171143618532 0.03351377488200076\n",
      "rand:  0.6106461948166002 0.025764474689767676\n",
      "rand:  0.5770887303958434 0.01830450073236347\n",
      "rand:  0.553435377497078 0.01524887880740729\n",
      "rand:  0.5335143302182411 0.014840570533237849\n",
      "rand:  0.5152721644412395 0.014807356089604894\n",
      "rand:  0.4945897723808061 0.014265684841955645\n"
     ]
    }
   ],
   "source": [
    "for k in [0, 9, 19, 29, 39, 49, 59]:\n",
    "    np_rand = np.asarray(rand)\n",
    "    print(\"rand: \", np.mean(np_rand,axis=0)[k], np.std(np_rand, axis=0)[k])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "grad:  0.6566857836430341 0.025227638653641887\n",
      "\n",
      "\n",
      "ig:  0.7048006517538609 0.043646996385036094\n",
      "\n",
      "\n",
      "sg:  0.6529584999922041 0.026374049951387576\n",
      "\n",
      "\n",
      "rand:  0.6766171143618532 0.03351377488200076\n",
      "\n",
      "\n",
      "pca:  0.7890936354057254 0.023855923474116608\n",
      "\n",
      "\n",
      "ica:  0.5093636537630892 0.031324494344476365\n",
      "\n",
      "\n",
      "9\n",
      "grad:  0.6007008232139593 0.009334686673510926\n",
      "\n",
      "\n",
      "ig:  0.6128853262498324 0.015043398007702125\n",
      "\n",
      "\n",
      "sg:  0.5987308258664203 0.010491921146340998\n",
      "\n",
      "\n",
      "rand:  0.6106461948166002 0.025764474689767676\n",
      "\n",
      "\n",
      "pca:  0.601651636254651 0.007402452484247177\n",
      "\n",
      "\n",
      "ica:  0.4413905866951202 0.006715657259945714\n",
      "\n",
      "\n",
      "19\n",
      "grad:  0.563650776351572 0.008577465280652594\n",
      "\n",
      "\n",
      "ig:  0.5691725365535384 0.011576429590849924\n",
      "\n",
      "\n",
      "sg:  0.5626377745329079 0.007047259193862447\n",
      "\n",
      "\n",
      "rand:  0.5770887303958434 0.01830450073236347\n",
      "\n",
      "\n",
      "pca:  0.49657191004076645 0.004933594195955331\n",
      "\n",
      "\n",
      "ica:  0.41025017014415477 0.00669805935836392\n",
      "\n",
      "\n",
      "29\n",
      "grad:  0.5345301950244883 0.008022401328864413\n",
      "\n",
      "\n",
      "ig:  0.541172117835252 0.011056943067446243\n",
      "\n",
      "\n",
      "sg:  0.5362566322314839 0.008174519905079341\n",
      "\n",
      "\n",
      "rand:  0.553435377497078 0.01524887880740729\n",
      "\n",
      "\n",
      "pca:  0.4400778688584398 0.005633490610756235\n",
      "\n",
      "\n",
      "ica:  0.38878763939848576 0.007031353489608004\n",
      "\n",
      "\n",
      "39\n",
      "grad:  0.5101731846586087 0.00932775918742891\n",
      "\n",
      "\n",
      "ig:  0.5167577619575572 0.009823960268412988\n",
      "\n",
      "\n",
      "sg:  0.5139709148260858 0.010161786092682673\n",
      "\n",
      "\n",
      "rand:  0.5335143302182411 0.014840570533237849\n",
      "\n",
      "\n",
      "pca:  0.40158673628600994 0.004102751044421606\n",
      "\n",
      "\n",
      "ica:  0.3696383340665005 0.007057093713470129\n",
      "\n",
      "\n",
      "49\n",
      "grad:  0.48786063849054945 0.010971937311770704\n",
      "\n",
      "\n",
      "ig:  0.4933605661079592 0.008343893043726304\n",
      "\n",
      "\n",
      "sg:  0.49330333646294927 0.01031186144004624\n",
      "\n",
      "\n",
      "rand:  0.5152721644412395 0.014807356089604894\n",
      "\n",
      "\n",
      "pca:  0.37162235204232985 0.003878243149487148\n",
      "\n",
      "\n",
      "ica:  0.3527968572795382 0.0070452610033641914\n",
      "\n",
      "\n",
      "59\n",
      "grad:  0.4630343728766847 0.01192911076824765\n",
      "\n",
      "\n",
      "ig:  0.46786934887736853 0.005606358197698934\n",
      "\n",
      "\n",
      "sg:  0.4685702232044108 0.006551074002330336\n",
      "\n",
      "\n",
      "rand:  0.4945897723808061 0.014265684841955645\n",
      "\n",
      "\n",
      "pca:  0.34588221606891556 0.0042982475283123486\n",
      "\n",
      "\n",
      "ica:  0.3354970226440942 0.006715076805421829\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for k in [0, 9, 19, 29, 39, 49, 59]:\n",
    "    print(k)\n",
    "    np_grad = np.asarray(grad)\n",
    "    print(\"grad: \", np.mean(np_grad,axis=0)[k], np.std(np_grad, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_ig = np.asarray(ig)\n",
    "    print(\"ig: \", np.mean(np_ig,axis=0)[k], np.std(np_ig, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_sg = np.asarray(sg)\n",
    "    print(\"sg: \", np.mean(np_sg,axis=0)[k], np.std(np_sg, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_rand = np.asarray(rand)\n",
    "    print(\"rand: \", np.mean(np_rand,axis=0)[k], np.std(np_rand, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_pca = np.asarray(pca)\n",
    "    print(\"pca: \", np.mean(np_pca,axis=0)[k], np.std(np_pca, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_ica = np.asarray(ica)\n",
    "    print(\"ica: \", np.mean(np_ica,axis=0)[k], np.std(np_ica, axis=0)[k])\n",
    "    print('\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [],
   "source": [
    "### get the best out of ours \n",
    "all_ours = np.stack([np_grad, np_ig, np_sg], axis=1) #dim=(seed, [ig,sg,grad], 20)\n",
    "best_ours = []\n",
    "for i in range(5):\n",
    "    best_ours.append(all_ours[i,np.argmax(all_ours[i,:,0]),:])\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 121,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "best ours:  0.7122433312545944 0.03506706845965369\n",
      "9\n",
      "best ours:  0.618518954245866 0.00760929564340516\n",
      "19\n",
      "best ours:  0.5724571448420483 0.0067674466178150585\n",
      "29\n",
      "best ours:  0.5426496549156116 0.008620413073767432\n",
      "39\n",
      "best ours:  0.5161933277339822 0.01064129017195091\n",
      "49\n",
      "best ours:  0.4915316223974996 0.010834664393829409\n",
      "59\n",
      "best ours:  0.4649791943209327 0.010060948438614182\n"
     ]
    }
   ],
   "source": [
    "for k in [0, 9, 19, 29, 39, 49, 59]:\n",
    "    print(k)\n",
    "    np_best_ours = np.asarray(best_ours)\n",
    "    print(\"best ours: \", np.mean(np_best_ours,axis=0)[k], np.std(np_best_ours, axis=0)[k])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "our classifier:  77.46600000000001 0.374945329348158\n"
     ]
    }
   ],
   "source": [
    "### calculate classifier acc\n",
    "acc = np.array([77.53, 77.12, 78.07, 77.02, 77.59])\n",
    "print(\"our classifier: \", np.mean(acc), np.std(acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Model_Wrapper(\n",
       "  (features): ResNet(\n",
       "    (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
       "    (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (relu): ReLU(inplace=True)\n",
       "    (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "    (layer1): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer2): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (3): Bottleneck(\n",
       "        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer3): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (3): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (4): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (5): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer4): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n",
       "    (dropout): Dropout(p=0.5, inplace=False)\n",
       "    (fc): Linear(in_features=2048, out_features=1000, bias=True)\n",
       "  )\n",
       "  (fc1): Linear(in_features=2048, out_features=512, bias=True)\n",
       "  (fc2): Linear(in_features=512, out_features=200, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224'\n",
    "# Initialize model\n",
    "start_epoch = 0\n",
    "net = Model_Wrapper(num_classes=num_classes, M=config.num_attentions, net=config.net, pretrained=True, pth_path=config.pretrain_path)\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:10<00:00, 17.88it/s]\n"
     ]
    }
   ],
   "source": [
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "224_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "224_model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "224_model_bestacc.pth_attrib-gradcam_disj-True_px-False_norm-False_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "224_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "224_model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "224_model_bestacc.pth_attrib-gradcam_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 6 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho4/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'resnet50_224/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Ours"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho4/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6836340166602618 0.6303368275877607 0.5978917560278226\n",
      "\n",
      "\n",
      "post_ortho4/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-gradcam_disj-True_px-False_norm-False_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "post_ortho4/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.763338070896982 0.676910180568088 0.6259480095452135\n",
      "\n",
      "\n",
      "post_ortho4/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.707772302784336 0.674180933528824 0.6561640087679634\n",
      "\n",
      "\n",
      "post_ortho4/checkpoint-checkpoints/resnet50_224/model_bestacc.pth_attrib-gradcam_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,6):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### PCA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.7429218420203433, 0.7402664719307781, 0.7093517894773727, 0.6830565029697954, 0.6577320675641152, 0.6375110265671289, 0.6221540261114047, 0.6098934826536111, 0.5988948523009813, 0.5875547422099597, 0.5729722100262313, 0.559966949335359, 0.5488833498135584, 0.5381326361590446, 0.5284921658923135, 0.5199615203887511, 0.5110681906916974, 0.5029292486984581, 0.4955896668162947, 0.48890497952265183, 0.4825344508841233, 0.47659816871763083, 0.47061102958978124, 0.46496148145898974, 0.4597304048686347, 0.45475576100245585, 0.44949257330170794, 0.4445492593832682, 0.43989822111346233, 0.4355491634119871, 0.4314513592079328, 0.42743659769986897, 0.4236639994272963, 0.4200660969643583, 0.41662069097158605, 0.41298123895314265, 0.4094840670621725, 0.40611989209785565, 0.4028661720537494, 0.3997478656963045, 0.3967487704902653, 0.3938458391016977, 0.39106664027160964, 0.3884111915634509, 0.38575508431205174, 0.38318104240269263, 0.3805486848001076, 0.3777991252809083, 0.37515126385048275, 0.3725847491185786, 0.3701159353827344, 0.36760413029015704, 0.3650800771600968, 0.3626457662482185, 0.360288631040032, 0.35796606249841084, 0.3557040234076607, 0.353494772728437, 0.3513193540385411, 0.34891341761686334]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### ICA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.4770955859105779, 0.47671398975774526, 0.47207935388904615, 0.4668575005054261, 0.4629953598619741, 0.4593673771305289, 0.4547390108905672, 0.4511026998085874, 0.44794889028114176, 0.44519861263619775, 0.44255383088532657, 0.43846149457867756, 0.4343366619360711, 0.43018724136536785, 0.4265083232165402, 0.422497759528033, 0.41892730979080356, 0.41558368264113843, 0.4124072938939027, 0.40947020132549217, 0.4067990360066328, 0.40413526916155934, 0.40149642991193135, 0.39890807554292884, 0.39638071465193553, 0.39401332700597613, 0.39177727363427345, 0.389547375277797, 0.3874020579914299, 0.38529844250372813, 0.3832333012822151, 0.381252857907877, 0.37938174918404194, 0.37728751195245464, 0.37529900443438385, 0.37334803381517856, 0.3712879483777796, 0.3691888728135288, 0.36716919197885084, 0.3652464881381454, 0.36338445504532235, 0.36153520138388284, 0.3597390187794492, 0.35801132935865465, 0.35624691661980395, 0.3545546141835264, 0.35285283879485263, 0.3510777722437344, 0.34927738196294716, 0.3474605039816639, 0.3457065805880842, 0.34400749769796046, 0.34221996920690756, 0.34049705394202845, 0.3388187856442191, 0.3371441944983591, 0.3355004487759217, 0.33381544527742585, 0.33212597207094213, 0.33035073676649496]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "0.6721584884268643 0.656719640744858 0.6302930377649174\n",
      "[0.67215849 0.66810199 0.66646192 0.66190364 0.65671964 0.65161917\n",
      " 0.64540302 0.63912099 0.63422374 0.63029304 0.62681811 0.62384758\n",
      " 0.6210669  0.61803756 0.61505492 0.61148077 0.60817526 0.60504017\n",
      " 0.60197117 0.5990306  0.59596368 0.59228026 0.58859246 0.58520411\n",
      " 0.58186228 0.57843991 0.57523211 0.57223191 0.56938521 0.56659099\n",
      " 0.56395422 0.56138654 0.55884028 0.55643245 0.55408401 0.55159385\n",
      " 0.54918493 0.54684062 0.54458626 0.54231369 0.54010347 0.53793169\n",
      " 0.53574693 0.53343182 0.53120448 0.52900289 0.52673988 0.52456943\n",
      " 0.52225785 0.51989755 0.51748425 0.51512044 0.51255147 0.51007337\n",
      " 0.50736667 0.50470199 0.50202131 0.49941873 0.49634243 0.49334213] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "np_result.shape\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Model_Wrapper(\n",
       "  (features): ResNet(\n",
       "    (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
       "    (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (relu): ReLU(inplace=True)\n",
       "    (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "    (layer1): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer2): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (3): Bottleneck(\n",
       "        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer3): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (3): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (4): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (5): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer4): Sequential(\n",
       "      (0): Bottleneck(\n",
       "        (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): Bottleneck(\n",
       "        (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Bottleneck(\n",
       "        (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (relu): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n",
       "    (dropout): Dropout(p=0.5, inplace=False)\n",
       "    (fc): Linear(in_features=2048, out_features=1000, bias=True)\n",
       "  )\n",
       "  (fc1): Linear(in_features=2048, out_features=512, bias=True)\n",
       "  (fc2): Linear(in_features=512, out_features=200, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed1'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:07<00:00, 25.29it/s]\n"
     ]
    }
   ],
   "source": [
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed1_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed1_model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed1_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed1_model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 4 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho6/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed1/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho6/checkpoint-checkpoints/seed1/model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6742840561795806 0.6533243822594558 0.6355506060050556\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed1/model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6568997784336977 0.6235829353388407 0.6006605208754748\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed1/model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7075546221169465 0.6820532636936028 0.64964756290976\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.7893771815904295, 0.7888188086309602, 0.7595429757128512, 0.7378706726472032, 0.7085431095966646, 0.6872195812116256, 0.6595097341778817, 0.6384887904178647, 0.6217259482005457, 0.603736683095617, 0.5885942701375941, 0.5746582514261578, 0.5619619129766334, 0.5492080996663053, 0.5380852508466902, 0.5275291423436117, 0.51775774590116, 0.5089307880692437, 0.5007490168938502, 0.4933251574291909, 0.4861262744673804, 0.4793870461396468, 0.4731618838135866, 0.4671391570212912, 0.461113155700162, 0.4553955498223243, 0.45005908008028533, 0.44491352437066173, 0.4400853176753915, 0.43551764525375547, 0.43123306013334484, 0.42719418419200084, 0.423197248936759, 0.41942227418363504, 0.41576346653014107, 0.4121474232963811, 0.40848877047778664, 0.404987838140312, 0.4015951137887596, 0.3981039466945498, 0.3944612982473274, 0.39090583127065465, 0.38741531331431606, 0.3840653573903101, 0.38080597645496145, 0.37760286858036607, 0.3744109964304059, 0.37133374229785127, 0.36837755248229975, 0.3655272101537944, 0.36269783598035593, 0.3599306650952266, 0.35726679691020596, 0.35455923514097976, 0.35192983535978334, 0.34933959388995994, 0.34673361753342646, 0.3441337438261656, 0.3415892690326851, 0.3384880089232187]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.5351978014402716, 0.5003983099478846, 0.48424697297343394, 0.4748619613776406, 0.4687499638199532, 0.4636966098868358, 0.45986254157616485, 0.45687806810826315, 0.4541481093294954, 0.4511598172833378, 0.4470856182082205, 0.4433518425206791, 0.4398361406722205, 0.43680864512931356, 0.43413980093223997, 0.43164959405818226, 0.4293177284814183, 0.4272125852187404, 0.42488613747600656, 0.42265438326205923, 0.4204854035952281, 0.4184395996771769, 0.4163984081631694, 0.414399018611367, 0.41252024081026034, 0.41072345882458505, 0.4090214886973445, 0.40709513698516947, 0.40495876341196363, 0.4027448173335735, 0.40066745795641256, 0.3986998519326688, 0.3966452819660794, 0.3945243962715209, 0.39242192131920256, 0.3904279004460834, 0.38841467001527513, 0.3865005892402508, 0.3846347689571873, 0.3828545778761453, 0.3811002389709501, 0.37941606879437934, 0.37776654494354706, 0.37611910982701624, 0.37437118122017127, 0.37251381859413685, 0.3707254455355097, 0.36894981172912295, 0.3671597546541119, 0.3653806409424433, 0.3636590498916096, 0.36173587886167424, 0.35987893372008173, 0.35805210099337353, 0.35626366828913353, 0.35453023632716796, 0.3525599674315569, 0.35064555477376175, 0.34878220396572773, 0.3467512055443608]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.662751344253738, 0.6622359121463325, 0.6580239988117806, 0.6543878153931284, 0.6505900779340725, 0.6424696257650623, 0.633129141761559, 0.6256690718895794, 0.6193786296700703, 0.6128620374861615, 0.6065385240771642, 0.6007929578194862, 0.5958203865646146, 0.5914957681831339, 0.5874675467895455, 0.5837663368811115, 0.580238604522287, 0.5769000592446379, 0.5738596456164613, 0.5706151087423648, 0.5675061307870876, 0.5645842564985215, 0.5615513277237609, 0.5587377046629523, 0.5560418630739288, 0.5533941201033704, 0.5507959375220405, 0.5483783078911549, 0.5461060139173706, 0.543960247669937, 0.5417129241719775, 0.5395408279132872, 0.5374389928751282, 0.5354335622436205, 0.5335324696964691, 0.5316332329651462, 0.5298324006252065, 0.5280442308432184, 0.5261474244615463, 0.5242722857068995, 0.5224782509402562, 0.5207041554204677, 0.5189964246783927, 0.517283461575698, 0.5156384427634072, 0.514061270295896, 0.5124043606406822, 0.5107868136304353, 0.5090675871220837, 0.5073793310786899, 0.5056280662053143, 0.5039229432050958, 0.5021687494776356, 0.5004359634304293, 0.49867895319570865, 0.496823155037109, 0.49490902416471777, 0.49303296437053573, 0.4905271337047446, 0.48756817221689386]\n",
      "[0.66275134 0.66223591 0.658024   0.65438782 0.65059008 0.64246963\n",
      " 0.63312914 0.62566907 0.61937863 0.61286204 0.60653852 0.60079296\n",
      " 0.59582039 0.59149577 0.58746755 0.58376634 0.5802386  0.57690006\n",
      " 0.57385965 0.57061511 0.56750613 0.56458426 0.56155133 0.5587377\n",
      " 0.55604186 0.55339412 0.55079594 0.54837831 0.54610601 0.54396025\n",
      " 0.54171292 0.53954083 0.53743899 0.53543356 0.53353247 0.53163323\n",
      " 0.5298324  0.52804423 0.52614742 0.52427229 0.52247825 0.52070416\n",
      " 0.51899642 0.51728346 0.51563844 0.51406127 0.51240436 0.51078681\n",
      " 0.50906759 0.50737933 0.50562807 0.50392294 0.50216875 0.50043596\n",
      " 0.49867895 0.49682316 0.49490902 0.49303296 0.49052713 0.48756817] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "np_result.shape\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:29<00:00,  6.13it/s]\n"
     ]
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed2'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed2_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed2_model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed2_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed2_model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 4 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho6/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed2/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho6/checkpoint-checkpoints/seed2/model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.655851875004431 0.6384606547134559 0.6234585925261035\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed2/model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7473466530855235 0.7140861959154525 0.6825070709488281\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed2/model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7260713231331978 0.6993135039312538 0.6539256414685786\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.8018548931837765, 0.7979329040517569, 0.7471602487010784, 0.7200157399011142, 0.7017747144581945, 0.6884212762076397, 0.6622226786850888, 0.6413166476817218, 0.6248679340725185, 0.6065801051853572, 0.5908628094284541, 0.5759819787567257, 0.56312248483593, 0.5515318806907351, 0.5405970779173572, 0.5309578550039756, 0.5220228248838139, 0.5134045885052533, 0.5055375578772209, 0.4975041688314654, 0.4896625820204342, 0.4824399170588752, 0.47581755203628384, 0.46906889808242785, 0.46285565393889555, 0.4566887529455602, 0.4509581914191316, 0.44557184127217325, 0.4404878776616435, 0.4357383785199319, 0.43128075108344666, 0.4270806972276536, 0.4231092886004758, 0.41918302131800844, 0.4154079269428136, 0.411813409406051, 0.40833153417500184, 0.4049774235722982, 0.4015564582531913, 0.3983052721099613, 0.39512876297678373, 0.3920716145905003, 0.38899750446213033, 0.3860077746002046, 0.38310424057833437, 0.38031071592509563, 0.3775425113308252, 0.37480904366315276, 0.37216221313923836, 0.36961971504489044, 0.36701045095335216, 0.3644921069013263, 0.3620367205150021, 0.35953802048153094, 0.35708447461037524, 0.35471233121860174, 0.352381967944689, 0.3501257221314196, 0.3479270849578635, 0.3455647141471939]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.4909430908381139, 0.4892176505900996, 0.4788079016772622, 0.47180948355581026, 0.46620931022440415, 0.4598158762278704, 0.45490264888909004, 0.4508962436456352, 0.44561000068731704, 0.4413158161163624, 0.43780162045405446, 0.4344833270374913, 0.4313168715061076, 0.4283803412487826, 0.42579083728094597, 0.4234147557168838, 0.421091030300053, 0.41862575108293504, 0.41567892872823214, 0.41282156436872874, 0.4092074908685924, 0.405881540311654, 0.4027916729717423, 0.39992448813059694, 0.3972354261697349, 0.39458683077043627, 0.39195210733780955, 0.38945859614883516, 0.3869468697797744, 0.3845758885545248, 0.3820872826893961, 0.3797299615424408, 0.37740333297757517, 0.37514514258203424, 0.3729436568287704, 0.3706637274451977, 0.3685021558470912, 0.3663796347427362, 0.3643587506066351, 0.36242712196292326, 0.36055001824060373, 0.3586538294472253, 0.35681418730281644, 0.35500359149737154, 0.35326151761871544, 0.3515854105248191, 0.3499116266213677, 0.348279832605257, 0.34669534604556435, 0.3451432615577536, 0.3436407055651025, 0.3421061921314215, 0.34062024463178675, 0.33901882764464264, 0.3374429845473292, 0.335901921368753, 0.33441122717442745, 0.33274950256425656, 0.33106497577958516, 0.32877278181925235]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.6250051889724602, 0.6212421382110829, 0.6184391955037247, 0.6165681505082813, 0.6153860943641727, 0.6119089755817742, 0.6082082663784097, 0.6052703706710197, 0.6013741019049947, 0.5982283451142474, 0.595426689229059, 0.5929641193647907, 0.5906875063203226, 0.5880641101602277, 0.5856738516074191, 0.5835010079776738, 0.5813476537587203, 0.5792868972931297, 0.5774139132916013, 0.575568532348668, 0.5736936822720309, 0.5718151491999224, 0.5700777083199905, 0.5679900734018638, 0.5655832823033718, 0.5632501282877689, 0.5609798969790772, 0.5585611562122655, 0.5558724634982467, 0.5533059617050227, 0.5507896881389535, 0.5483558036453617, 0.5458138102337688, 0.5433750338876083, 0.5410388047876384, 0.5387087374395141, 0.5364987025661401, 0.534366378318932, 0.532072771219821, 0.5298531120701815, 0.5276339020037355, 0.525496864844084, 0.5233663781478682, 0.5212549982896273, 0.5191952419754362, 0.5169922728526405, 0.5148732978530841, 0.5123998789417681, 0.5097849561215126, 0.5071472617786933, 0.504459745053853, 0.5017482830449874, 0.499111568642447, 0.4960743869773009, 0.4931130681186605, 0.4902198108265227, 0.487387300256426, 0.48434753145755965, 0.48140866963454365, 0.4784493232012543]\n",
      "[0.62500519 0.62124214 0.6184392  0.61656815 0.61538609 0.61190898\n",
      " 0.60820827 0.60527037 0.6013741  0.59822835 0.59542669 0.59296412\n",
      " 0.59068751 0.58806411 0.58567385 0.58350101 0.58134765 0.5792869\n",
      " 0.57741391 0.57556853 0.57369368 0.57181515 0.57007771 0.56799007\n",
      " 0.56558328 0.56325013 0.5609799  0.55856116 0.55587246 0.55330596\n",
      " 0.55078969 0.5483558  0.54581381 0.54337503 0.5410388  0.53870874\n",
      " 0.5364987  0.53436638 0.53207277 0.52985311 0.5276339  0.52549686\n",
      " 0.52336638 0.521255   0.51919524 0.51699227 0.5148733  0.51239988\n",
      " 0.50978496 0.50714726 0.50445975 0.50174828 0.49911157 0.49607439\n",
      " 0.49311307 0.49021981 0.4873873  0.48434753 0.48140867 0.47844932] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:44<00:00,  4.07it/s]\n"
     ]
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed3'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed3_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed3_model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed3_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed3_model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 4 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho6/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed3/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho6/checkpoint-checkpoints/seed3/model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7065690269677124 0.688337408094195 0.6628905657561113\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed3/model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6689368040664758 0.6278000895432443 0.6077398317919788\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed3/model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6765625122091818 0.6697823140797725 0.6529208075816599\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.8046417220778795, 0.8022089266307891, 0.7330315103156891, 0.6970629456658866, 0.6737505669613808, 0.6581300275411481, 0.6443459170391154, 0.6338015660912106, 0.6190454523478379, 0.6019064338432609, 0.5876733235492447, 0.5749555273528372, 0.5638305394077835, 0.5531444954628532, 0.5434193539650567, 0.534329951625554, 0.5262858685562624, 0.5176202615559268, 0.5096660648224058, 0.502481053969147, 0.4959674901503507, 0.4897229938644918, 0.4837631150369475, 0.47813758288279956, 0.47274066147136884, 0.4675227874932789, 0.4625475296562453, 0.4578621213601887, 0.4533314177889672, 0.44887422665986193, 0.4443498851952177, 0.4400577111290787, 0.43583824132443355, 0.4315433465676117, 0.4274642554485843, 0.42356522076831427, 0.41975310361400103, 0.41610889622707514, 0.41263077693145916, 0.4091813146308761, 0.4058991076180629, 0.4023939452173244, 0.3990184534920037, 0.3956853026093636, 0.39244612611080776, 0.38926705197759054, 0.3861794527159336, 0.38308719406621733, 0.3800869269968195, 0.3771928338243896, 0.37440833472529905, 0.37167379653111243, 0.3690255761624835, 0.36646622571587023, 0.36393191531291286, 0.3614708815102275, 0.3588354897167326, 0.35628081294885877, 0.35374151119328273, 0.35121632863582736]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.5183098517892837, 0.49464501763569085, 0.4754428610137891, 0.4651229198127865, 0.4583402681723257, 0.4527506684954415, 0.4487160909186055, 0.44470360257662156, 0.44040709856201754, 0.43679933322076225, 0.43365639365854564, 0.4307515947905378, 0.42813950597262235, 0.4258544917358232, 0.4234117606473984, 0.42066258915917787, 0.4181098024768694, 0.4157848201136545, 0.4136030309966247, 0.4115903237652912, 0.40969038007909286, 0.40785052245028997, 0.4059944597693411, 0.40397703941995095, 0.4021148408894152, 0.39985233152554234, 0.39775283657293353, 0.39560330999883553, 0.3935819695245161, 0.3916565976379991, 0.38973316318035456, 0.3879194970209916, 0.38618130827173963, 0.3845184965857045, 0.3824811126147142, 0.3804723345023008, 0.3785055613301457, 0.37664079599838013, 0.3748295703195568, 0.373089173423586, 0.3714105273698959, 0.3696574367571512, 0.36779178752662034, 0.36598728088192956, 0.364214237661866, 0.3624188225406616, 0.3606585466122967, 0.35891238858986846, 0.35698185627721907, 0.355091376676125, 0.35321314465112613, 0.35135953958226673, 0.34957486874595134, 0.3477120014907321, 0.34588444197235063, 0.3438294510902341, 0.3418309797169422, 0.33981976333306674, 0.3378522820220463, 0.3358985957181065]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.6710639174071473, 0.6662492014839211, 0.6623613997614379, 0.6586310015118251, 0.656113876318249, 0.6501960221896732, 0.6444681969531872, 0.6382900278548914, 0.6330566312155602, 0.6286343793447247, 0.6239216821031068, 0.619918255144985, 0.616032822230272, 0.6126232095546406, 0.6091809236809892, 0.606125095834829, 0.6034241206236837, 0.6009052930914601, 0.5985943454086309, 0.5963612632030082, 0.594252429680834, 0.5922280997756731, 0.5903262265150087, 0.5880540994140405, 0.5859569949306416, 0.5839833936342458, 0.5820891966464431, 0.5798578756837365, 0.5777447034173772, 0.5757709734007379, 0.5734793151221395, 0.5712136723372807, 0.5690257797759412, 0.5669015927968191, 0.5648804151556918, 0.5627209507964447, 0.5605952418250048, 0.5583002917490506, 0.5560566070407238, 0.553866406476093, 0.5515327432850122, 0.549297354804756, 0.5470086854049933, 0.5448136349853411, 0.5426813488655521, 0.5405755303253776, 0.538373654049556, 0.536172520853628, 0.5340181293454166, 0.5318859336549536, 0.5297819860746561, 0.5277400926186817, 0.5256323693499962, 0.5233629239413631, 0.5211324559518713, 0.5187210260137591, 0.5163814124095113, 0.5139098053047035, 0.5112819782827172, 0.5085946215549451]\n",
      "[0.67106392 0.6662492  0.6623614  0.658631   0.65611388 0.65019602\n",
      " 0.6444682  0.63829003 0.63305663 0.62863438 0.62392168 0.61991826\n",
      " 0.61603282 0.61262321 0.60918092 0.6061251  0.60342412 0.60090529\n",
      " 0.59859435 0.59636126 0.59425243 0.5922281  0.59032623 0.5880541\n",
      " 0.58595699 0.58398339 0.5820892  0.57985788 0.5777447  0.57577097\n",
      " 0.57347932 0.57121367 0.56902578 0.56690159 0.56488042 0.56272095\n",
      " 0.56059524 0.55830029 0.55605661 0.55386641 0.55153274 0.54929735\n",
      " 0.54700869 0.54481363 0.54268135 0.54057553 0.53837365 0.53617252\n",
      " 0.53401813 0.53188593 0.52978199 0.52774009 0.52563237 0.52336292\n",
      " 0.52113246 0.51872103 0.51638141 0.51390981 0.51128198 0.50859462] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model_Wrapper: All params loaded\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 182/182 [00:07<00:00, 25.38it/s]\n"
     ]
    }
   ],
   "source": [
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224/seed4'\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed4_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-16\n",
      "seed4_model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed4_model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "seed4_model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "Found and loaded 16 log files.\n"
     ]
    }
   ],
   "source": [
    "log_dir = 'post_ortho6/checkpoint-checkpoints/'\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'seed4/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "post_ortho6/checkpoint-checkpoints/seed4/model_bestacc.pth_attrib-sg_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7085779796918247 0.6980918848153116 0.687127951202971\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed4/model_bestacc.pth_attrib-grad_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.6663950083875064 0.6442356666049298 0.6147843514324841\n",
      "\n",
      "\n",
      "post_ortho6/checkpoint-checkpoints/seed4/model_bestacc.pth_attrib-ig_disj-True_px-False_norm-True_smooth-False_cutoff-0.0_optim-adam_lr-0.001_batchsize-8\n",
      "0.7331779204194921 0.7005294824614431 0.6737332892142734\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.8066725381561981, 0.8047227864289791, 0.7396506642087367, 0.7066635481488853, 0.6864190029998514, 0.6691745892204913, 0.6561696878835682, 0.6464029726455401, 0.626123754518905, 0.6084609247900431, 0.5928654872794762, 0.5789144183149267, 0.5664298127375809, 0.5551727223009556, 0.5450783610536211, 0.5356603275842275, 0.5259727142298827, 0.5172930177239313, 0.5085892366424797, 0.500650653353934, 0.4930969971974605, 0.48622464250097147, 0.4799484951121288, 0.47417550041859907, 0.4687945700538006, 0.46381323172430233, 0.4588519664269772, 0.4540915757551263, 0.44964261882587164, 0.4447047638602456, 0.43958939002864345, 0.4347717321385848, 0.430189490945402, 0.42576307511875866, 0.4215336212311145, 0.4174714665218399, 0.41352026396707997, 0.4097156283941621, 0.4060615087947915, 0.40256959279838467, 0.39915198934598145, 0.395882054780102, 0.39276188871406753, 0.389760374810877, 0.3868766153951632, 0.384086381252818, 0.3813597424902862, 0.3786996069958222, 0.3760349238248711, 0.37317470862616886, 0.37028709324987114, 0.36748371088432363, 0.3646747783924214, 0.3618675076438053, 0.35915331821803625, 0.3563984252664362, 0.3537171242477853, 0.3509292176365283, 0.34803620616842373, 0.3452331632618449]\n"
     ]
    }
   ],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.5533868004240448, 0.5150972016102875, 0.499527969844832, 0.4888417876406804, 0.47945750469907755, 0.46964639464292324, 0.4588089920743392, 0.4504997504564577, 0.4437716017308472, 0.43814921112448585, 0.43323413668750455, 0.4286727021610617, 0.4247185242011802, 0.42110887080651577, 0.41782178245762613, 0.41455134886771317, 0.4114966899967246, 0.40859200502756843, 0.4058925464531857, 0.4033734408578867, 0.4010837812459841, 0.3989944864471134, 0.39706313869677606, 0.3952879639413926, 0.39352949180859603, 0.39181166053343375, 0.3900563302420767, 0.38825979985475534, 0.3863046968932735, 0.38443966190037404, 0.38267364331049863, 0.38100337694902264, 0.37919472877666927, 0.37741223553376785, 0.3756547491400136, 0.37393372637129885, 0.37229813692542946, 0.37074549860703676, 0.36912087069255173, 0.3674844389765859, 0.3657777669692572, 0.36408642061691693, 0.3624506308729559, 0.3608667670644505, 0.35933540749135884, 0.35785159893004015, 0.3562396716922225, 0.35465692872844107, 0.35307894878305596, 0.3514921532730321, 0.3499056490469997, 0.3481954867470914, 0.3464521934323552, 0.34475537960094677, 0.3430634120275343, 0.3414179811631957, 0.339787666022658, 0.3379809264688821, 0.33600350857214123, 0.3338560774349403]\n"
     ]
    }
   ],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[0.6644328990068853, 0.6643178240305876, 0.6549950831812282, 0.6451691841603835, 0.6391943858135172, 0.6350740102529554, 0.6303126016828109, 0.6261589673954919, 0.6228181741673029, 0.6200482251121435, 0.6163706075023023, 0.6126957363072063, 0.607643586749182, 0.6032253761950549, 0.5990974243366174, 0.595353290406957, 0.5918799334186854, 0.5887032456312297, 0.5851888979487344, 0.5813891374421339, 0.577915248198081, 0.574638104957721, 0.5716402157888354, 0.5688872823010537, 0.5659046997532382, 0.5631507648403705, 0.5605827184730727, 0.5579598035128518, 0.5554391156439019, 0.552950322869737, 0.5506166864998713, 0.5484017098399567, 0.5461680613899944, 0.5440506119248341, 0.5419133490517646, 0.5398240670283345, 0.5378230829658988, 0.5359136798940201, 0.5340537434476916, 0.5320958342971415, 0.5302180400362091, 0.5281586152235729, 0.5261609227710544, 0.5241523851053436, 0.5222209279213407, 0.5202125713666323, 0.5181003627037087, 0.5159830599756161, 0.5139306421315029, 0.511897224692618, 0.5099227629068869, 0.5080151102686975, 0.5058938328545427, 0.5036977603368185, 0.5014165403729784, 0.4992000550869543, 0.4970539686481457, 0.49477528211562455, 0.49244305640185876, 0.48958186885330607]\n",
      "[0.6644329  0.66431782 0.65499508 0.64516918 0.63919439 0.63507401\n",
      " 0.6303126  0.62615897 0.62281817 0.62004823 0.61637061 0.61269574\n",
      " 0.60764359 0.60322538 0.59909742 0.59535329 0.59187993 0.58870325\n",
      " 0.5851889  0.58138914 0.57791525 0.5746381  0.57164022 0.56888728\n",
      " 0.5659047  0.56315076 0.56058272 0.5579598  0.55543912 0.55295032\n",
      " 0.55061669 0.54840171 0.54616806 0.54405061 0.54191335 0.53982407\n",
      " 0.53782308 0.53591368 0.53405374 0.53209583 0.53021804 0.52815862\n",
      " 0.52616092 0.52415239 0.52222093 0.52021257 0.51810036 0.51598306\n",
      " 0.51393064 0.51189722 0.50992276 0.50801511 0.50589383 0.50369776\n",
      " 0.50141654 0.49920006 0.49705397 0.49477528 0.49244306 0.48958187] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
      " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
     ]
    }
   ],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### summary for five seeds"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "np_grad = np.asarray(grad)\n",
    "print(\"grad: \", np.mean(np_grad,axis=0), np.std(np_grad, axis=0))\n",
    "print('\\n')\n",
    "np_ig = np.asarray(ig)\n",
    "print(\"ig: \", np.mean(np_ig,axis=0), np.std(np_ig, axis=0))\n",
    "print('\\n')\n",
    "np_sg = np.asarray(sg)\n",
    "print(\"sg: \", np.mean(np_sg,axis=0), np.std(np_sg, axis=0))\n",
    "print('\\n')\n",
    "np_rand = np.asarray(rand)\n",
    "print(\"rand: \", np.mean(np_rand,axis=0), np.std(np_rand, axis=0))\n",
    "print('\\n')\n",
    "np_pca = np.asarray(pca)\n",
    "print(\"pca: \", np.mean(np_pca,axis=0), np.std(np_pca, axis=0))\n",
    "print('\\n')\n",
    "np_ica = np.asarray(ica)\n",
    "print(\"ica: \", np.mean(np_ica,axis=0), np.std(np_ica, axis=0))\n",
    "print('\\n')\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "rand:  0.6590823676134191 0.01742346780003858\n",
      "rand:  0.6180132049644389 0.011710850964013614\n",
      "rand:  0.5845929283551683 0.011260759382785077\n",
      "rand:  0.5585156993254881 0.011251051264794898\n",
      "rand:  0.53648026646565 0.010473623572556781\n",
      "rand:  0.5156414604085621 0.009342705275441385\n",
      "rand:  0.4915072239803505 0.009849649180007912\n"
     ]
    }
   ],
   "source": [
    "for k in [0, 9, 19, 29, 39, 49, 59]:\n",
    "    np_rand = np.asarray(rand)\n",
    "    print(\"rand: \", np.mean(np_rand,axis=0)[k], np.std(np_rand, axis=0)[k])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "grad:  0.7005832629740372 0.04517424599661237\n",
      "\n",
      "\n",
      "ig:  0.7102277361326308 0.019617605364213843\n",
      "\n",
      "\n",
      "sg:  0.685783390900762 0.019921777517928257\n",
      "\n",
      "\n",
      "rand:  0.6590823676134191 0.01742346780003858\n",
      "\n",
      "\n",
      "pca:  0.7890936354057254 0.023855923474116608\n",
      "\n",
      "\n",
      "ica:  0.5149866260804584 0.027956864417384538\n",
      "\n",
      "\n",
      "9\n",
      "grad:  0.6263279569187958 0.029304701703303005\n",
      "\n",
      "\n",
      "ig:  0.6572782619884471 0.008490754064616324\n",
      "\n",
      "\n",
      "sg:  0.6413838943036128 0.030990209571949482\n",
      "\n",
      "\n",
      "rand:  0.6180132049644389 0.011710850964013614\n",
      "\n",
      "\n",
      "pca:  0.6016477778248477 0.007399877103968577\n",
      "\n",
      "\n",
      "ica:  0.4425245580762292 0.005198099404262519\n",
      "\n",
      "\n",
      "19\n",
      "grad:  0.5849322230870875 0.02776648103687335\n",
      "\n",
      "\n",
      "ig:  0.6150724110804617 0.013250310823966485\n",
      "\n",
      "\n",
      "sg:  0.6103142844656915 0.03159698158489673\n",
      "\n",
      "\n",
      "rand:  0.5845929283551683 0.011260759382785077\n",
      "\n",
      "\n",
      "pca:  0.4965732026212778 0.004934500038918008\n",
      "\n",
      "\n",
      "ica:  0.4119819827158916 0.006247791068487886\n",
      "\n",
      "\n",
      "29\n",
      "grad:  0.5577997589195313 0.027819489437635986\n",
      "\n",
      "\n",
      "ig:  0.5865331839032494 0.016368677660049704\n",
      "\n",
      "\n",
      "sg:  0.5839031520712789 0.03209061975484123\n",
      "\n",
      "\n",
      "rand:  0.5585156993254881 0.011251051264794898\n",
      "\n",
      "\n",
      "pca:  0.44007683554115634 0.005637730950180338\n",
      "\n",
      "\n",
      "ica:  0.38974308158603993 0.0070326045941607965\n",
      "\n",
      "\n",
      "39\n",
      "grad:  0.535435973293934 0.028842766856395325\n",
      "\n",
      "\n",
      "ig:  0.5617118682615119 0.01848287029122656\n",
      "\n",
      "\n",
      "sg:  0.5611957578944177 0.03263868671883359\n",
      "\n",
      "\n",
      "rand:  0.53648026646565 0.010473623572556781\n",
      "\n",
      "\n",
      "pca:  0.40158159838601526 0.0041212321141353555\n",
      "\n",
      "\n",
      "ica:  0.37022036007547715 0.007222364931908445\n",
      "\n",
      "\n",
      "49\n",
      "grad:  0.5145330194340809 0.030489028514753727\n",
      "\n",
      "\n",
      "ig:  0.5386578092332863 0.02054077334954571\n",
      "\n",
      "\n",
      "sg:  0.5398365140358906 0.03313414736685914\n",
      "\n",
      "\n",
      "rand:  0.5156414604085621 0.009342705275441385\n",
      "\n",
      "\n",
      "pca:  0.3716198433535644 0.003886581132494626\n",
      "\n",
      "\n",
      "ica:  0.3529135872862036 0.0071035870882347465\n",
      "\n",
      "\n",
      "59\n",
      "grad:  0.4896536740422093 0.02992784058098751\n",
      "\n",
      "\n",
      "ig:  0.5138556808664159 0.020827485110084913\n",
      "\n",
      "\n",
      "sg:  0.5162956593866413 0.033490345486787546\n",
      "\n",
      "\n",
      "rand:  0.4915072239803505 0.009849649180007912\n",
      "\n",
      "\n",
      "pca:  0.34588312651698966 0.004308997839088041\n",
      "\n",
      "\n",
      "ica:  0.3351258794566309 0.006332796089284971\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for k in [0, 9, 19, 29, 39, 49, 59]:\n",
    "    print(k)\n",
    "    np_grad = np.asarray(grad)\n",
    "    print(\"grad: \", np.mean(np_grad,axis=0)[k], np.std(np_grad, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_ig = np.asarray(ig)\n",
    "    print(\"ig: \", np.mean(np_ig,axis=0)[k], np.std(np_ig, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_sg = np.asarray(sg)\n",
    "    print(\"sg: \", np.mean(np_sg,axis=0)[k], np.std(np_sg, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_rand = np.asarray(rand)\n",
    "    print(\"rand: \", np.mean(np_rand,axis=0)[k], np.std(np_rand, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_pca = np.asarray(pca)\n",
    "    print(\"pca: \", np.mean(np_pca,axis=0)[k], np.std(np_pca, axis=0)[k])\n",
    "    print('\\n')\n",
    "    np_ica = np.asarray(ica)\n",
    "    print(\"ica: \", np.mean(np_ica,axis=0)[k], np.std(np_ica, axis=0)[k])\n",
    "    print('\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "### get the best out of ours \n",
    "all_ours = np.stack([np_grad, np_ig, np_sg], axis=1) #dim=(seed, [ig,sg,grad], 20)\n",
    "best_ours = []\n",
    "for i in range(5):\n",
    "    best_ours.append(all_ours[i,np.argmax(all_ours[i,:,0]),:])\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "best ours:  0.7315972586973313 0.022192259152308845\n",
      "9\n",
      "best ours:  0.6589452996748373 0.019822592612923386\n",
      "19\n",
      "best ours:  0.6202645723041824 0.02294114583604084\n",
      "29\n",
      "best ours:  0.5948724047127 0.02616776970822917\n",
      "39\n",
      "best ours:  0.5731840073957619 0.027944563138525606\n",
      "49\n",
      "best ours:  0.5531503203194388 0.028986026865783065\n",
      "59\n",
      "best ours:  0.5291404026222003 0.028260681912062182\n"
     ]
    }
   ],
   "source": [
    "for k in [0, 9, 19, 29, 39, 49, 59]:\n",
    "    print(k)\n",
    "    np_best_ours = np.asarray(best_ours)\n",
    "    print(\"best ours: \", np.mean(np_best_ours,axis=0)[k], np.std(np_best_ours, axis=0)[k])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "our classifier:  77.46600000000001 0.374945329348158\n"
     ]
    }
   ],
   "source": [
    "### calculate classifier acc\n",
    "acc = np.array([77.53, 77.12, 78.07, 77.02, 77.59])\n",
    "print(\"our classifier: \", np.mean(acc), np.std(acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "disentangle",
   "language": "python",
   "name": "disentangle"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
