{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "import numpy as np\n",
    "\n",
    "import pickle as pkl\n",
    "import pandas as pd\n",
    "import random\n",
    "import numpy as np\n",
    "import time\n",
    "\n",
    "import datetime\n",
    "import math\n",
    "import numpy as np\n",
    "import torch\n",
    "from torch import nn\n",
    "from torch.nn import Module, Parameter\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./Data/Processed/word_emb_tensor.pkl', 'rb') as f:\n",
    "    word_embed_tensor = pkl.load(f)\n",
    "word_embed_tensor = torch.tensor(word_embed_tensor, device='cuda:4')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "toggle = word_embed_tensor[0:2].cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./Data/Processed/toggle.pkl', 'wb') as f:\n",
    "    pkl.dump(toggle, f, pkl.HIGHEST_PROTOCOL)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0, 1, 2, 3, 4, 5])"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "word_embed_tensor.shape[0]\n",
    "torch.cat([torch.tensor([0, 1, 2]), torch.tensor([3, 4, 5])])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home2/Dlib_RecSys/anaconda3/envs/SR-GNN/lib/python3.7/site-packages/ipykernel_launcher.py:6: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
      "  \n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(torch.Size([105542]),\n",
       " tensor([1.0000, 0.9632, 0.9498,  ..., 0.9350, 0.8742, 0.9190], device='cuda:4'),\n",
       " torch.Size([100]),\n",
       " tensor([     0,  71173,  72495, 101344,  30902,  10109,  94767,  63573,  40478,\n",
       "          96804,   1698,  80935,  36088,  35623,  47254,  47253,  62638,  72486,\n",
       "          64657,  15147,  15137,  87495,  77177, 102303,  62641,  89179, 103220,\n",
       "          32316,  76473,  88971,  83527,  60829,  91226,  62634,  86782,  77092,\n",
       "          87486,   5317,  90082,  69638,  71518,  95402,  83574,  87546,  58658,\n",
       "          10944,  44866,  99615,  94970,  90198, 101064,  24207,   3505,  44962,\n",
       "           3510,  71011,  99189,  78285,  72597,   8803,   8544, 102963,  32656,\n",
       "          60349, 105099,  72497,  23874,    115,  71962,  87493,  19701,  17617,\n",
       "          39395, 104309,  95000,  65505,  14904,  45920,  42394,  76305,  14066,\n",
       "          57269,  71903,  14076,  30921,  14575,  14569,  14132,   6144,   4215,\n",
       "          40406,  98446,  44871, 103616,  14062,  26183,  76834,  29007,    290,\n",
       "          12789], device='cuda:4'))"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "n = word_embed_tensor.shape[0]\n",
    "cos_sim = nn.CosineSimilarity(dim=1, eps=1e-6)\n",
    "dist0 = cos_sim(word_embed_tensor[0:1], word_embed_tensor[0:n//2])\n",
    "dist1 = cos_sim(word_embed_tensor[0:1], word_embed_tensor[n//2:])\n",
    "dist = torch.cat([dist0, dist1])\n",
    "ind = torch.topk(torch.tensor(dist), 100)[1]\n",
    "dist.shape, dist, ind.shape, ind"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[     0,  71173,  72495, 101344,  30902,  10109,  94767,  63573,  40478,\n",
       "          96804,   1698,  80935,  36088,  35623,  47254,  47253,  62638,  72486,\n",
       "          64657,  15147,  15137,  87495,  77177, 102303,  62641,  89179, 103220,\n",
       "          32316,  76473,  88971,  83527,  60829,  91226,  62634,  86782,  77092,\n",
       "          87486,   5317,  90082,  69638,  71518,  95402,  83574,  87546,  58658,\n",
       "          10944,  44866,  99615,  94970,  90198, 101064,  24207,   3505,  44962,\n",
       "           3510,  71011,  99189,  78285,  72597,   8803,   8544, 102963,  32656,\n",
       "          60349, 105099,  72497,  23874,    115,  71962,  87493,  19701,  17617,\n",
       "          39395, 104309,  95000,  65505,  14904,  45920,  42394,  76305,  14066,\n",
       "          57269,  71903,  14076,  30921,  14575,  14569,  14132,   6144,   4215,\n",
       "          40406,  98446,  44871, 103616,  14062,  26183,  76834,  29007,    290,\n",
       "          12789],\n",
       "        [     0,  71173,  72495, 101344,  30902,  10109,  94767,  63573,  40478,\n",
       "          96804,   1698,  80935,  36088,  35623,  47254,  47253,  62638,  72486,\n",
       "          64657,  15147,  15137,  87495,  77177, 102303,  62641,  89179, 103220,\n",
       "          32316,  76473,  88971,  83527,  60829,  91226,  62634,  86782,  77092,\n",
       "          87486,   5317,  90082,  69638,  71518,  95402,  83574,  87546,  58658,\n",
       "          10944,  44866,  99615,  94970,  90198, 101064,  24207,   3505,  44962,\n",
       "           3510,  71011,  99189,  78285,  72597,   8803,   8544, 102963,  32656,\n",
       "          60349, 105099,  72497,  23874,    115,  71962,  87493,  19701,  17617,\n",
       "          39395, 104309,  95000,  65505,  14904,  45920,  42394,  76305,  14066,\n",
       "          57269,  71903,  14076,  30921,  14575,  14569,  14132,   6144,   4215,\n",
       "          40406,  98446,  44871, 103616,  14062,  26183,  76834,  29007,    290,\n",
       "          12789]], device='cuda:4')"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.stack([ind, ind])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home2/Dlib_RecSys/anaconda3/envs/SR-GNN/lib/python3.7/site-packages/ipykernel_launcher.py:9: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
      "  if __name__ == '__main__':\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1000\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_21532/3558142000.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      4\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m%\u001b[0m\u001b[0;36m1000\u001b[0m\u001b[0;34m==\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m     \u001b[0mdist0\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcos_sim\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mword_embed_tensor\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mword_embed_tensor\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mn\u001b[0m\u001b[0;34m//\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      7\u001b[0m     \u001b[0mdist1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcos_sim\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mword_embed_tensor\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mword_embed_tensor\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mn\u001b[0m\u001b[0;34m//\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      8\u001b[0m     \u001b[0mdist\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mdist0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdist1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/SR-GNN/lib/python3.7/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1049\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[1;32m   1050\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1051\u001b[0;31m             \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1052\u001b[0m         \u001b[0;31m# Do not call functions when jit is used\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1053\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/SR-GNN/lib/python3.7/site-packages/torch/nn/modules/distance.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, x1, x2)\u001b[0m\n\u001b[1;32m     73\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     74\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mx1\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mx2\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 75\u001b[0;31m         \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcosine_similarity\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mx2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0meps\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "topk_index = []\n",
    "n = word_embed_tensor.shape[0]\n",
    "cos_sim = nn.CosineSimilarity(dim=1, eps=1e-6)\n",
    "for i in range(n):\n",
    "    if i%1000==0: print(i)\n",
    "    dist0 = cos_sim(word_embed_tensor[i:i+1], word_embed_tensor[0:n//2])\n",
    "    dist1 = cos_sim(word_embed_tensor[i:i+1], word_embed_tensor[n//2:])\n",
    "    dist = torch.cat([dist0, dist1])\n",
    "    ind = torch.topk(torch.tensor(dist), 100)[1]\n",
    "    topk_index.append(ind)\n",
    "topk_index = torch.stack(topk_index)\n",
    "topk_index.shape, topk_index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[     0,  71173,  72495, 101344,  30902,  10109,  94767,  63573,\n",
       "         40478,  96804,   1698,  80935,  36088,  35623,  47253,  47254,\n",
       "         62638,  72486,  64657,  15147,  15137,  87495,  77177, 102303,\n",
       "         62641,  89179, 103220,  32316,  76473,  88971,  83527,  60829,\n",
       "         91226,  62634,  86782,  77092,  87486,   5317,  90082,  69638,\n",
       "         71518,  95402,  83574,  87546,  58658,  10944,  44866,  99615,\n",
       "         94970,  90198, 101064,  24207,   3505,  44962,   3510,  71011,\n",
       "         99189,  78285,  72597,   8803,   8544, 102963,  32656,  60349,\n",
       "        105099,  72497,  23874,    115,  71962,  87493,  19701,  17617,\n",
       "         39395, 104309,  95000,  65505,  14904,  45920,  42394,  76305,\n",
       "         14066,  57269,  71903,  14076,  30921,  14569,  14575,  14132,\n",
       "          6144,   4215,  40406,  98446,  44871, 103616,  14062,  26183,\n",
       "         76834,  29007,    290,  12789],\n",
       "       [     0,  71173,  72495, 101344,  30902,  10109,  94767,  63573,\n",
       "         40478,  96804,   1698,  80935,  36088,  35623,  47253,  47254,\n",
       "         62638,  72486,  64657,  15147,  15137,  87495,  77177, 102303,\n",
       "         62641,  89179, 103220,  32316,  76473,  88971,  83527,  60829,\n",
       "         91226,  62634,  86782,  77092,  87486,   5317,  90082,  69638,\n",
       "         71518,  95402,  83574,  87546,  58658,  10944,  44866,  99615,\n",
       "         94970,  90198, 101064,  24207,   3505,  44962,   3510,  71011,\n",
       "         99189,  78285,  72597,   8803,   8544, 102963,  32656,  60349,\n",
       "        105099,  72497,  23874,    115,  71962,  87493,  19701,  17617,\n",
       "         39395, 104309,  95000,  65505,  14904,  45920,  42394,  76305,\n",
       "         14066,  57269,  71903,  14076,  30921,  14569,  14575,  14132,\n",
       "          6144,   4215,  40406,  98446,  44871, 103616,  14062,  26183,\n",
       "         76834,  29007,    290,  12789]])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.concatenate([ind, ind])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "7c2e65e64076883662e9fbb467097aa6d81839e6b77012bdd0b6d8c4fbfb9623"
  },
  "kernelspec": {
   "display_name": "Python 3.7.11 ('SR-GNN')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
