{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "def find_best(checkpoint, task):\n",
    "    log_files_a = os.listdir(checkpoint+\"/\")\n",
    "    log_files_b = []\n",
    "\n",
    "    for file in log_files_a:\n",
    "        file_split = file.split(\".\")\n",
    "        if(file_split[-1]==\"txt\"):\n",
    "            file_split_2 = file_split[0].split(\"_\")\n",
    "            if(file_split_2[0]==\"session\" and file_split_2[1]==str(task) ):\n",
    "                f = np.loadtxt(checkpoint+\"/\"+file, skiprows=1)\n",
    "                best_acc = max(f[-1,-1], f[-1,-2])\n",
    "                if(file_split_2[-1]!=\"RT\"):\n",
    "                    return best_acc\n",
    "                    \n",
    "    return best_acc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "##### import numpy as np\n",
    "from collections import Counter\n",
    "\n",
    "def softmax(x):\n",
    "    \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n",
    "    e_x = np.exp(x)**(1/2) \n",
    "    return e_x / np.sum(e_x, axis=1, keepdims=True)\n",
    "\n",
    "def get_correct(acc_task, task, chunks, start_point, class_per_task):\n",
    "        correct = 0\n",
    "        correct2 = 0\n",
    "        task_scores = []\n",
    "        class_scores = []\n",
    "        targets = []\n",
    "        targets_pred = []\n",
    "        for t in range(task+1):\n",
    "            list_0 = []\n",
    "            list_1 = []\n",
    "            list_2 = []\n",
    "            list_3 = []\n",
    "            for i in range(chunks):\n",
    "                acc_task_0 = acc_task[start_point + i]\n",
    "                list_0.append(acc_task_0[t][0].detach().cpu().numpy())\n",
    "                list_1.append(acc_task_0[t][1].detach().cpu().numpy())\n",
    "                list_2.append(acc_task_0[t][2])\n",
    "                list_3.append(acc_task_0[t][3].detach().cpu().numpy())\n",
    "            list_0 = np.array(list_0)\n",
    "            list_1 = np.array(list_1)\n",
    "            list_2 = np.array(list_2)\n",
    "            list_3 = np.array(list_3)\n",
    "            \n",
    "            targets_pred.append(list_0)\n",
    "            class_scores.append(list_1)\n",
    "            task_scores.append(list_2)\n",
    "            targets.append(list_3)\n",
    "            \n",
    "        m = task_scores[0]\n",
    "        task_scores2 = []\n",
    "        for t2 in range(task+1):\n",
    "            m2 = m[:,t2:(t2+1)]\n",
    "            m3 = np.max(m2, 1)\n",
    "            task_scores2.append(np.mean(m3))\n",
    "        pred_task = np.argmax(task_scores2)  \n",
    "        if(pred_task == targets[0][0]//class_per_task):\n",
    "            correct2 += chunks\n",
    "            for j in range(chunks):\n",
    "                local_t = np.argmax(class_scores[pred_task][j])\n",
    "                pred_x = [targets_pred[pred_task][j][local_t]]\n",
    "                target_x = targets[0][j]\n",
    "                if(target_x in pred_x+pred_task*class_per_task):\n",
    "                    correct += 1 \n",
    "#         else:\n",
    "#             print(pred_task, targets[0][0]//class_per_task)\n",
    "        return correct, correct2\n",
    "    \n",
    "def get_mata_score(p, task, chunks):\n",
    "    task_samples = {0: 1000, 1: 1000, 2: 1000, 3: 1000, 4: 1000, 5: 1000, 6: 1000, 7: 1000, 8: 1000, 9: 1000}\n",
    "    total_samples = np.sum([task_samples[x] for x in range(task+1)])\n",
    "    class_per_task = 10\n",
    "    with open(p + \"/meta_task_test_list_\"+str(task)+\".pickle\", 'rb') as handle:\n",
    "        acc_task = pickle.load(handle)\n",
    "    correct = 0\n",
    "    correct2= 0\n",
    "    for tt in range(task+1):\n",
    "        ctask_samples = np.sum([task_samples[x] for x in range(tt)])\n",
    "        for class_id in range(task_samples[tt]//chunks):\n",
    "            start_point = ctask_samples + class_id*chunks\n",
    "            c, c2 = get_correct(acc_task, task, chunks, start_point, class_per_task)\n",
    "            correct += c\n",
    "            correct2 += c2\n",
    "\n",
    "        new_chunk = task_samples[tt]-(class_id+1)*chunks\n",
    "        if(new_chunk>0):\n",
    "            start_point = ((task_samples[tt]//chunks)*chunks)\n",
    "            c, c2 = get_correct(acc_task, t, new_chunk, start_point, class_per_task)\n",
    "            correct += c\n",
    "            correct2 += c2\n",
    "    return correct/total_samples*100, correct2/total_samples*100\n",
    "\n",
    "# print(get_mata_score(\"models/cifar100/meta2_cifar_T10_63\", 6, 20) )           "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "models_dic = [\n",
    "    [\"results/cifar100/meta2_cifar_T10_63\",       \"brown\",          \"Base :  1.25M, ex=2000, 70ep, RAdam, iFAML, bs=128, r=1, b=1\",       \"<-\"   ],  \n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt \n",
    "import numpy as np\n",
    "import pickle\n",
    "x = range(10)\n",
    "icarl = [88.5, 78.39, 72.34, 66.94, 63.50, 60.23, 56.63, 54.17, 51.23, 49.10]\n",
    "ours = [88.8, 81.3, 77.266667, 72.575, 67.98, 66.0, 64.328571, 62.1, 59.744444, 57.36]\n",
    "\n",
    "chunks = 20\n",
    "\n",
    "plt.figure(figsize=(16,10))\n",
    "\n",
    "plt.plot(x, ours,  \"o-\", linewidth=2, c=\"orangered\", label=\"RPS net\")\n",
    "plt.plot(x, icarl, \"o-\", linewidth=2, c=\"steelblue\", label=\"iCaRL\")\n",
    "\n",
    "for i,p in enumerate(models_dic):\n",
    "    z = np.zeros_like(icarl)\n",
    "    z2 = np.zeros_like(icarl)\n",
    "    ps = \"models/\" + \"/\".join(p[0].split(\"/\")[1:])\n",
    "    try:\n",
    "        z = np.load(p[0]+\"/meta_\"+str(chunks)+\".npy\")\n",
    "        z2 = np.load(p[0]+\"/meta_task_\"+str(chunks)+\".npy\")\n",
    "        if(np.sum(z>0)<10):\n",
    "            for j in range(np.sum(z>0), 10):\n",
    "                try:\n",
    "                    z[j], z2[j] = get_mata_score(ps , j, chunks)\n",
    "                except Exception as e:\n",
    "                    pass\n",
    "            np.save(p[0]+\"/meta_\"+str(chunks)+\".npy\", z)\n",
    "            np.save(p[0]+\"/meta_task_\"+str(chunks)+\".npy\", z2)\n",
    "    except:\n",
    "        for j in range(10):\n",
    "            try:\n",
    "                z[j], z2[j] = get_mata_score(ps , j, chunks)\n",
    "            except Exception as e:\n",
    "#                 print(e)\n",
    "                pass\n",
    "        np.save(p[0]+\"/meta_\"+str(chunks)+\".npy\", z)\n",
    "        np.save(p[0]+\"/meta_task_\"+str(chunks)+\".npy\", z2)\n",
    "        \n",
    "    plt.plot(x, z, p[3], linewidth=1, c = p[1], label=p[2])\n",
    "    plt.plot(x, z2, \"--\", linewidth=1, c = p[1], label=p[2] + \"  task accuracy\")\n",
    "    print(\",\".join([str(i) for i in z]) )\n",
    "#     print(\",\".join([str(i) for i in z2]) )\n",
    "    \n",
    "plt.grid(b=True, which='major', color='gray', linestyle='-', alpha=0.4)\n",
    "plt.grid(b=True, which='minor', color='gray', linestyle='--', alpha=0.1)\n",
    "plt.minorticks_on()\n",
    "\n",
    "plt.ylim([0,102])\n",
    "plt.legend(loc='lower left',  ncol=1, handleheight=1.0, labelspacing=0.05, prop={'size': 9, \"weight\":\"bold\"})\n",
    "\n",
    "plt.xlabel(\"Number of tasks\", fontweight='bold', fontsize=15)\n",
    "plt.ylabel(\"Accuracy\", fontweight='bold', fontsize=15)\n",
    "plt.title(\"CNN\", fontweight='bold', fontsize=17) \n",
    "plt.xticks(range(10))\n",
    "# plt.savefig(\"meta_10.pdf\")\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
