{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import copy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "models = []\n",
    "for e in range(5):\n",
    "    c_dict = torch.load(f\"/media/yunhao/DATA/Project/FCAT/causal_log/u{e}e0.t7\")\n",
    "    models.append(c_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def average_weights(w):\n",
    "    \"\"\"\n",
    "    Returns the average of the weights.\n",
    "    \"\"\"\n",
    "    w_avg = copy.deepcopy(w[0])\n",
    "    for key in w_avg.keys():\n",
    "        w_avg[key] = w_avg[key].to(\"cpu\")\n",
    "        for i in range(1, len(w)):\n",
    "            w[i][key] = w[i][key].to(\"cpu\")\n",
    "            w_avg[key] += w[i][key]\n",
    "        if 'num_batches_tracked' in key:\n",
    "            w_avg[key] = w_avg[key].true_divide(len(w))\n",
    "        else:\n",
    "            w_avg[key] = torch.div(w_avg[key], len(w))\n",
    "    return w_avg\n",
    "c_avg = average_weights(models)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "distance = []\n",
    "bn_distance = []\n",
    "for i in range(len(models)):\n",
    "    c_dict = models[i]\n",
    "    idx_d = []\n",
    "    idx_bn = []\n",
    "    for key in c_dict.keys():\n",
    "        if 'num_batches_tracked' in key:\n",
    "            idx_dict = c_dict[key].cpu().numpy().flatten()\n",
    "            avg_dict = c_avg[key].cpu().numpy().flatten()\n",
    "            distance_idx = np.sqrt(np.sum((idx_dict - avg_dict)**2))\n",
    "            idx_bn.append(distance_idx)\n",
    "        else:\n",
    "            idx_dict = c_dict[key].cpu().numpy().flatten()\n",
    "            avg_dict = c_avg[key].cpu().numpy().flatten()\n",
    "            distance_idx = np.sqrt(np.sum((idx_dict - avg_dict)**2))\n",
    "            idx_d.append(distance_idx)\n",
    "    distance.append(idx_d)\n",
    "    bn_distance.append(idx_bn)\n",
    "\n",
    "distance = np.array(distance)\n",
    "bn_distance = np.array(bn_distance)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "odict_keys(['encoder.0.conv.0.weight', 'encoder.0.conv.0.bias', 'encoder.0.conv.1.weight', 'encoder.0.conv.1.bias', 'encoder.0.conv.1.running_mean', 'encoder.0.conv.1.running_var', 'encoder.0.conv.1.num_batches_tracked', 'encoder.1.conv.0.weight', 'encoder.1.conv.0.bias', 'encoder.1.conv.1.weight', 'encoder.1.conv.1.bias', 'encoder.1.conv.1.running_mean', 'encoder.1.conv.1.running_var', 'encoder.1.conv.1.num_batches_tracked', 'encoder.2.conv.0.weight', 'encoder.2.conv.0.bias', 'encoder.2.conv.1.weight', 'encoder.2.conv.1.bias', 'encoder.2.conv.1.running_mean', 'encoder.2.conv.1.running_var', 'encoder.2.conv.1.num_batches_tracked', 'linear.0.weight', 'linear.0.bias', 'decoder.0.deconv.0.weight', 'decoder.0.deconv.0.bias', 'decoder.0.deconv.1.weight', 'decoder.0.deconv.1.bias', 'decoder.0.deconv.1.running_mean', 'decoder.0.deconv.1.running_var', 'decoder.0.deconv.1.num_batches_tracked', 'decoder.1.deconv.0.weight', 'decoder.1.deconv.0.bias', 'decoder.1.deconv.1.weight', 'decoder.1.deconv.1.bias', 'decoder.1.deconv.1.running_mean', 'decoder.1.deconv.1.running_var', 'decoder.1.deconv.1.num_batches_tracked', 'decoder.2.deconv.0.weight', 'decoder.2.deconv.0.bias', 'decoder.2.deconv.1.weight', 'decoder.2.deconv.1.bias', 'decoder.2.deconv.1.running_mean', 'decoder.2.deconv.1.running_var', 'decoder.2.deconv.1.num_batches_tracked'])"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c_dict.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "other_mean = np.mean(distance,axis=0)\n",
    "bn_mean = np.mean(bn_distance, axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "157.18488"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.sum(other_mean)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "252.79999999999998"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.mean(bn_mean)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def distance_compute(epoch, path=\"/media/yunhao/DATA/Project/FCAT\"):\n",
    "    def average_weights(w):\n",
    "        \"\"\"\n",
    "        Returns the average of the weights.\n",
    "        \"\"\"\n",
    "        w_avg = copy.deepcopy(w[0])\n",
    "        for key in w_avg.keys():\n",
    "            w_avg[key] = w_avg[key].to(\"cpu\")\n",
    "            for i in range(1, len(w)):\n",
    "                w[i][key] = w[i][key].to(\"cpu\")\n",
    "                w_avg[key] += w[i][key]\n",
    "            if 'num_batches_tracked' in key:\n",
    "                w_avg[key] = w_avg[key].true_divide(len(w))\n",
    "            else:\n",
    "                w_avg[key] = torch.div(w_avg[key], len(w))\n",
    "        return w_avg\n",
    "    \n",
    "    \n",
    "    models = []\n",
    "    for u in range(5):\n",
    "        c_dict = torch.load(f\"{path}/causal_log/u{u}e{epoch}.t7\")\n",
    "        models.append(c_dict)\n",
    "    c_avg = average_weights(models)\n",
    "    \n",
    "    distance = []\n",
    "    bn_distance = []\n",
    "    for i in range(len(models)):\n",
    "        c_dict = models[i]\n",
    "        idx_d = []\n",
    "        idx_bn = []\n",
    "        for key in c_dict.keys():\n",
    "            if 'num_batches_tracked' in key:\n",
    "                idx_dict = c_dict[key].cpu().numpy().flatten()\n",
    "                avg_dict = c_avg[key].cpu().numpy().flatten()\n",
    "                distance_idx = np.sqrt(np.sum((idx_dict - avg_dict)**2))\n",
    "                idx_bn.append(distance_idx)\n",
    "            else:\n",
    "                idx_dict = c_dict[key].cpu().numpy().flatten()\n",
    "                avg_dict = c_avg[key].cpu().numpy().flatten()\n",
    "                distance_idx = np.sqrt(np.sum((idx_dict - avg_dict)**2))\n",
    "                idx_d.append(distance_idx)\n",
    "        distance.append(idx_d)\n",
    "        bn_distance.append(idx_bn)\n",
    "\n",
    "    distance = np.array(distance)\n",
    "    bn_distance = np.array(bn_distance)\n",
    "    other_mean = np.mean(distance,axis=0)\n",
    "    bn_mean = np.mean(bn_distance, axis=0)\n",
    "    return np.sum(other_mean), np.mean(bn_mean)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "others = []\n",
    "bns = []\n",
    "for epoch in range(150):\n",
    "    other, bn = distance_compute(epoch=epoch)\n",
    "    d = other + bn\n",
    "    others.append(other/d)\n",
    "    bns.append(bn/d)\n",
    "import pandas as pd\n",
    "res = pd.DataFrame({\n",
    "    'others':others,\n",
    "    'bns':bns\n",
    "})\n",
    "res.to_csv(\"distance.csv\",index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ffcv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
