{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import matplotlib.pyplot as plt\n",
    "import glob\n",
    "import os\n",
    "from tqdm import tqdm\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "from cub_models import Model_Wrapper\n",
    "\n",
    "from cub_datasets import get_trainval_datasets\n",
    "from cub_models.utils import CenterLoss, AverageMeter, TopKAccuracyMetric, ModelCheckpoint, batch_augment, ConsistencyEvaluation, KMeans\n",
    "\n",
    "import sys \n",
    "sys.path.append('..')\n",
    "import common.cub_attributions as attribution_method\n",
    "\n",
    "import numpy as np\n",
    "from sklearn.manifold import TSNE\n",
    "from matplotlib import pyplot as plt\n",
    "\n",
    "from torchvision import transforms\n",
    "ToPILImage = transforms.ToPILImage()\n",
    "from PIL import Image\n",
    "\n",
    "\n",
    "import cub_models.config_distributed as config\n",
    "# torch.set_printoptions(profile=\"full\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "### for reproducebility\n",
    "seed = 199\n",
    "torch.manual_seed(seed)\n",
    "np.random.seed(seed)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### read class-wise ground-truth attributes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "DATAPATH ='./CUB_200_2011' ## path to cub data \n",
    "attribute_cls = []\n",
    "with open(os.path.join(DATAPATH, 'CUB_200_2011/attributes/class_attribute_labels_continuous.txt')) as f:\n",
    "    for line in f.readlines():\n",
    "        attr = line.strip().split(' ')\n",
    "        attr_float= torch.tensor([float(att) for att in attr])\n",
    "        attribute_cls.append(attr_float)\n",
    "attribute_cls = torch.stack(attribute_cls)\n",
    "#attribute_cls = F.normalize(attribute_cls, dim=1, p=2)\n",
    "\n",
    "attribute_names = {}\n",
    "with open(os.path.join(DATAPATH, 'attributes.txt')) as f:\n",
    "    for line in f.readlines():\n",
    "        attr = line.strip().split(' ')\n",
    "        attribute_names[attr[0]] = attr[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### store results\n",
    "ig = []\n",
    "sg = []\n",
    "grad = []\n",
    "rand = []\n",
    "pca = []\n",
    "ica = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load model and dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# GPU settings\n",
    "assert torch.cuda.is_available()\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = config.GPU\n",
    "device = torch.device(\"cuda:0\")\n",
    "torch.backends.cudnn.benchmark = True\n",
    "\n",
    "train_dataset, validate_dataset = get_trainval_datasets(config.tag, (224,224))\n",
    "\n",
    "validate_loader = DataLoader(validate_dataset, batch_size=32, shuffle=False,\n",
    "                                            num_workers=config.workers, pin_memory=True)\n",
    "\n",
    "num_classes = validate_dataset.num_classes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def concept_score(H, vector_dict):\n",
    "    ### norm H\n",
    "    # H_n = H\n",
    "    H_n = F.normalize(H, p=2.0, dim=1)\n",
    "    # v = F.normalize(v, p=2.0, dim=1)\n",
    "\n",
    "    ### project vectors onto the discorved directions\n",
    "    id_list = []\n",
    "    score_list = []\n",
    "\n",
    "    for k,v in vector_dict.items():\n",
    "        id_list.append(k)\n",
    "        # v = v/torch.norm(v)\n",
    "        score_list.append(torch.matmul(H_n, v))\n",
    "        \n",
    "    return id_list, score_list\n",
    "\n",
    "def concept_score_reduction(vector_dict, reduction='pca', seed=None):\n",
    "    if reduction == 'pca':\n",
    "        from sklearn.decomposition import PCA\n",
    "        reduction = PCA(n_components=30)\n",
    "    elif reduction == 'ica':\n",
    "        from sklearn.decomposition import FastICA\n",
    "        reduction = FastICA(n_components=30, random_state=seed)\n",
    "    else:\n",
    "        print('Not implemented')\n",
    "    ### vector reduction using PCA\n",
    "    id_list = []\n",
    "    vector_list = []\n",
    "\n",
    "    for k,v in vector_dict.items():\n",
    "        id_list.append(k)\n",
    "        # v = v/torch.norm(v)\n",
    "        vector_list.append(v)\n",
    "    vector_matrix = torch.stack(vector_list).numpy()\n",
    "    vector_matrix = torch.tensor(reduction.fit_transform(vector_matrix))\n",
    "    score_list = list(vector_matrix)\n",
    "\n",
    "    return id_list, score_list\n",
    "\n",
    "\n",
    "def concept_score_cls(score_list, id_list, validate_dataset=validate_dataset):\n",
    "    ### scores on class-level\n",
    "    score_cls = torch.zeros((200,30))\n",
    "    num_sample_cls = torch.zeros((200,1))\n",
    "    for s_list, id in zip (score_list, id_list):\n",
    "        score_cls[validate_dataset[id][1], :] += s_list\n",
    "        num_sample_cls[validate_dataset[id][1],:] += 1\n",
    "    score_cls /= num_sample_cls\n",
    "    return score_cls\n",
    "\n",
    "def concept_score_num_cls(score_list, id_list, validate_dataset=validate_dataset):\n",
    "    ### scores on class-level\n",
    "    score_cls = torch.zeros((200,60))\n",
    "    num_sample_cls = torch.zeros((200,1))\n",
    "    for s_list, id in zip (score_list, id_list):\n",
    "        score_pos = torch.relu(s_list)\n",
    "        score_neg = torch.relu(s_list*(-1))\n",
    "\n",
    "        s_list_bidir = torch.cat([score_pos, score_neg])\n",
    "        #print(s_list_bidir)\n",
    "        score_cls[validate_dataset[id][1], :] += s_list_bidir\n",
    "        #print(s_list_bidir)\n",
    "        num_sample_cls[validate_dataset[id][1],:] += 1\n",
    "    score_cls /= num_sample_cls\n",
    "    return score_cls\n",
    "\n",
    "def topK_corr(score_cls, attribute_cls, topN=5, n_C=60):\n",
    "    corr = np.corrcoef(torch.cat((score_cls, attribute_cls), dim=1).t())\n",
    "    corr_c2attr = abs(corr[:n_C, n_C:])\n",
    "    topN = 5\n",
    "    corr_v = np.zeros((n_C, topN))\n",
    "    corr_ind = np.zeros((n_C, topN))\n",
    "    ind_sort = np.argsort(corr_c2attr, axis=1)\n",
    "    ind_sort = ind_sort[:,::-1]\n",
    "\n",
    "    for c in range(n_C):\n",
    "        corr_v[c,:] = corr_c2attr[c, ind_sort[c,:][:topN]]\n",
    "        corr_ind[c,:] = ind_sort[c,:][:topN]\n",
    "    return corr_v, corr_ind\n",
    "\n",
    "def compute_corr(ranks):\n",
    "    \"\"\" Compute spearman rank correlation of the rank matrix. \n",
    "        ranks = (M, R) matrix (M-concepts, R-Ranks in range [1...R])\n",
    "        return (M,M) matrix with rank correlations between concepts.\n",
    "    \"\"\"\n",
    "    num_input = ranks.shape[0]\n",
    "    resmat = np.zeros((num_input,num_input))\n",
    "    for i in range(num_input):\n",
    "        for j in range(num_input):\n",
    "            cov = np.mean(ranks[i]*ranks[j], axis=0)-np.mean(ranks[i], axis = 0)*np.mean(ranks[j], axis=0) # E[XY]-E[Y]E[X]\n",
    "            corr = cov/(np.std(ranks[i], axis=0)*np.std(ranks[j], axis=0))\n",
    "            #print(corr)\n",
    "            resmat[i,j] = np.mean(corr)\n",
    "    return resmat\n",
    "\n",
    "def rank_corr(allres):\n",
    "    \"\"\" Return the rank correlation of concepts. allres = (n_concepts, n_samples)\n",
    "    \"\"\"\n",
    "    order = allres.argsort(axis=1)\n",
    "    ranks = order.argsort(axis=1)\n",
    "    return compute_corr(ranks)\n",
    "\n",
    "def topK_rank_corr(score_cls, attribute_cls, topN=5, n_C=60):\n",
    "    from scipy import stats\n",
    "    #### the following tow lines are for debugging \n",
    "    # corr = rank_corr(torch.cat((score_cls, attribute_cls), dim=1).t().numpy())\n",
    "    # print(corr.shape)\n",
    "    corr, pval = stats.spearmanr(torch.cat((score_cls, attribute_cls), dim=1))\n",
    "    # corr = np.corrcoef(torch.cat((score_cls, attribute_cls), dim=1).t())\n",
    "    corr_c2attr = abs(corr[:n_C, n_C:])\n",
    "    topN = 5\n",
    "    corr_v = np.zeros((n_C, topN))\n",
    "    corr_ind = np.zeros((n_C, topN))\n",
    "    ind_sort = np.argsort(corr_c2attr, axis=1)\n",
    "    ind_sort = ind_sort[:,::-1]\n",
    "\n",
    "    for c in range(n_C):\n",
    "        corr_v[c,:] = corr_c2attr[c, ind_sort[c,:][:topN]]\n",
    "        corr_ind[c,:] = ind_sort[c,:][:topN]\n",
    "    return corr_v, corr_ind"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## OA analysis"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "To train a Resnet-50 on CUB, please run the script \"train_distributed.sh\" in the folder \"../cub_training\" and give the saved model to \"model_dir\".\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "start_epoch = 0\n",
    "net = Model_Wrapper(num_classes=num_classes, M=config.num_attentions, net=config.net, pretrained=False, pth_path=config.pretrain_path)\n",
    "\n",
    "### trained model dir\n",
    "model_dir = './checkpoints/resnet50_224' ## path to trained resnet 50 for cub data\n",
    "\n",
    "# Load the model \n",
    "checkpoint = torch.load(os.path.join(model_dir, 'model_bestacc.pth'))\n",
    "state_dict = checkpoint['state_dict']\n",
    "net.load_state_dict(state_dict)\n",
    "\n",
    "net.to(device)\n",
    "net.eval()\n",
    "\n",
    "vector_dict = {}\n",
    "for (x, _, img_index, _) in tqdm((validate_loader)):\n",
    "    x = x.to(device)\n",
    "    _, _, out = net(x)\n",
    "    out = out.detach().cpu()\n",
    "    for k,v in zip(img_index, out):\n",
    "        vector_dict[k.item()] = v"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Study the found concept directions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "log_dir = './post_oa/checkpoint-checkpoints/'  ### path to saved concepts\n",
    "logs = []\n",
    "log_names = []\n",
    "name_pattern = 'resnet50_224/*'\n",
    "file_list = glob.glob(os.path.join(log_dir, name_pattern))\n",
    "results = {}\n",
    "for r in file_list:\n",
    "    resolution = str(r.split('/')[-2].split('_')[-1])\n",
    "    name = r.split('/')[-1]\n",
    "    name = resolution + \"_\" + name\n",
    "    print(name)\n",
    "    i = 0\n",
    "    while os.path.isfile(r +f\"/eval_results{i}.json\"):\n",
    "        log_names.append(name+\"_r\"+str(i))\n",
    "        logs.append((name+\"_r\" +str(i), json.load(open(r +f\"/eval_results{i}.json\"))))\n",
    "        i+=1\n",
    "print(f\"Found and loaded {len(logs)} log files.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "- \"concept_score()\" is the function for score calculation.\n",
    "\n",
    "- \"id\" is used to find out which class the image belongs to."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for H_index in range(1,4):\n",
    "    print(file_list[H_index])\n",
    "    if \"attrib-gradcam\" in file_list[H_index]:\n",
    "        continue\n",
    "    H = torch.load(os.path.join(file_list[H_index], 'intermediate_dirs.pt'))\n",
    "    id_list_224, score_list_224 = concept_score(H, vector_dict)\n",
    "    score_cls_all = concept_score_num_cls(score_list_224, id_list_224)\n",
    "    corr_v, corr_ind = topK_rank_corr(score_cls_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v.max(axis=1))\n",
    "    result_list = [corr_v[best_match_idx[(-1)*N:],0].mean() for N in range(1,61)]\n",
    "    if \"attrib-grad\" in file_list[H_index]:\n",
    "        grad.append(result_list)\n",
    "    elif \"attrib-sg\" in file_list[H_index]:\n",
    "        sg.append(result_list)\n",
    "    elif \"attrib-ig\" in file_list[H_index]:\n",
    "        ig.append(result_list)\n",
    "    print(result_list[0], result_list[4], result_list[9])\n",
    "    print(\"\\n\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Results from other baselines"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pca_id_list, pca_score_list = concept_score_reduction(vector_dict, reduction='pca')\n",
    "\n",
    "score_cls_all_pca = concept_score_num_cls(pca_score_list, pca_id_list)\n",
    "corr_v_pca, corr_ind_pca = topK_rank_corr(score_cls_all_pca, attribute_cls, topN=5, n_C=60)\n",
    "\n",
    "best_match_idx = np.argsort(corr_v_pca.max(axis=1))\n",
    "result_list_pca = [corr_v_pca.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "print(result_list_pca)\n",
    "pca.append(result_list_pca)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "result_list_ica = []\n",
    "random_seed_list = np.random.randint(99,size=10)\n",
    "for t in range(1):\n",
    "    ica_id_list, ica_score_list = concept_score_reduction(vector_dict, reduction='ica', seed=random_seed_list[t])\n",
    "\n",
    "    score_cls_all_ica = concept_score_num_cls(ica_score_list, ica_id_list)\n",
    "    corr_v_ica, corr_ind_ica = topK_rank_corr(score_cls_all_ica, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_v_ica.max(axis=1))\n",
    "    result_ica = [corr_v_ica.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_ica)\n",
    "    result_list_ica.append(result_ica)\n",
    "# print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "ica.append(result_ica)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "## computate a random baseline\n",
    "result_list_rand = []\n",
    "for t in range(1):\n",
    "    print(t)\n",
    "    result_list = []\n",
    "    H_rand = H.uniform_(-1, 1)\n",
    "\n",
    "    id_list_rand, score_list_rand = concept_score(H_rand, vector_dict)\n",
    "    score_cls_rand_all = concept_score_num_cls(score_list_rand, id_list_rand)\n",
    "    corr_rand_v, corr_rand_ind = topK_rank_corr(score_cls_rand_all, attribute_cls, topN=5, n_C=60)\n",
    "    best_match_idx = np.argsort(corr_rand_v.max(axis=1))\n",
    "    result_list = [corr_rand_v.max(axis=1)[best_match_idx[(-1)*N:]].mean() for N in range(1,61)]\n",
    "    print(result_list)\n",
    "    result_list_rand.append(result_list)\n",
    "np_result = np.asarray(result_list_rand)\n",
    "print(np.mean(np_result,axis=0), np.std(np_result, axis=0))\n",
    "rand.append(np.mean(np_result,axis=0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch2",
   "language": "python",
   "name": "pytorch2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
