{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c51e5438-dca6-4555-b953-e5247c1cdf1c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2024-03-07 00:49:02,902] [INFO] [real_accelerator.py:161:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "init\n",
    "\"\"\"\n",
    "import os\n",
    "import io\n",
    "import re\n",
    "import ast\n",
    "import pdb\n",
    "import clip\n",
    "import json\n",
    "import torch\n",
    "import faiss\n",
    "import chromadb\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "from tqdm import tqdm\n",
    "Image.MAX_IMAGE_PIXELS = None\n",
    "import matplotlib.pyplot as plt\n",
    "from transformers import AutoModel, AutoTokenizer\n",
    "from transformers import CLIPProcessor, CLIPModel\n",
    "\n",
    "# init clip\n",
    "clip_model, preprocess = clip.load(\"ViT-B/16\", device=\"cuda\")\n",
    "\n",
    "# save index\n",
    "def index_save(index_split, path):\n",
    "    faiss.write_index(index_split, path)\n",
    "    return"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "4f5fac82-f18f-4740-9c12-507ff042475d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "600\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Process:: 100%|██████████| 600/600 [00:07<00:00, 77.69it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total number of indexes: 600\n",
      "Saving index:\n",
      "Done\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "build memory\n",
    "\"\"\"\n",
    "file_path = \"/mnt/petrelfs/liuziyu/LLM_Memory/SimplyRetrieve/classification/New_test/database/cub200/cub200_database.txt\"\n",
    "with open(file_path, 'r') as file:\n",
    "    lines = file.readlines()\n",
    "image_paths = [line.split()[0] for line in lines]\n",
    "print(len(image_paths))\n",
    "\n",
    "### index保存位置\n",
    "index_img_save_path =  \"/mnt/petrelfs/liuziyu/LLM_Memory/SimplyRetrieve/classification/New_test/database/cub200/cub200_database.index\"\n",
    "\n",
    "### 建立一个图片index\n",
    "index_img = faiss.IndexHNSWFlat(512, 64, faiss.METRIC_INNER_PRODUCT)\n",
    "\n",
    "### 图片向量化\n",
    "embed_img = []\n",
    "with torch.no_grad():\n",
    "    for image_path in tqdm(image_paths,desc=\"Process:\"):\n",
    "        image = preprocess(Image.open(image_path)).unsqueeze(0).to(\"cuda\")\n",
    "        image_features = clip_model.encode_image(image)\n",
    "        image_features /= image_features.norm(dim=-1, keepdim=True)\n",
    "        embed_img.append(image_features.cpu())\n",
    "embed_img = [np.array(embed) for embed in embed_img]\n",
    "embed_img = np.array(embed_img).squeeze()\n",
    "index_img.add(embed_img)\n",
    "print(\"Total number of indexes:\", index_img.ntotal)\n",
    "\n",
    "### 保存index\n",
    "print(\"Saving index:\")\n",
    "index_save(index_img, index_img_save_path)\n",
    "print(\"Done\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b71ee42-d58c-4e1a-9b42-ce766d9824d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "index_img_save_path =  \"/mnt/petrelfs/liuziyu/LLM_Memory/SimplyRetrieve/classification/New_test/database/pets37/pets37_database.index\"\n",
    "trainset_file_path = \"/mnt/petrelfs/liuziyu/LLM_Memory/SimplyRetrieve/classification/New_test/database/pets37/pets37_database.txt\"\n",
    "predictions_save_path = \"/mnt/petrelfs/liuziyu/LLM_Memory/SimplyRetrieve/CLIP-Cls/output/ZeroshotCLIP_topk/vit_b16/oxford_pets/predictions_finer_knn.pth\"\n",
    "index = faiss.read_index(index_img_save_path)\n",
    "pth_file_path = '/mnt/petrelfs/liuziyu/LLM_Memory/SimplyRetrieve/CLIP-Cls/output/ZeroshotCLIP_topk/vit_b16/oxford_pets/predictions.pth'\n",
    "predictions = torch.load(pth_file_path)\n",
    "\n",
    "with open(trainset_file_path, 'r') as file:\n",
    "    lines = file.readlines()\n",
    "    for prediction in tqdm(predictions,desc=\"Process:\"):\n",
    "        ### 解析pth文件，获取图片位置和原来的预测结果\n",
    "        for item in prediction.values():\n",
    "            pre_class = item['pred_class']\n",
    "            # print(item['label'])\n",
    "        for item in prediction.keys():\n",
    "            test_img_path = item\n",
    "    \n",
    "        with torch.no_grad():\n",
    "            image = preprocess(Image.open(test_img_path)).unsqueeze(0).to(\"cuda\")\n",
    "            # torch.Size([1, 512])\n",
    "            image_features = clip_model.encode_image(image)\n",
    "            image_features /= image_features.norm(dim=-1, keepdim=True)\n",
    "            image_features = np.array(image_features.cpu())\n",
    "            distance, index_result = index.search(image_features, 8)\n",
    "            \n",
    "            labels = []\n",
    "            for index_number in index_result[0]:\n",
    "                parts = lines[index_number].strip().split(' ', 1)\n",
    "                part1, part2 = parts\n",
    "                labels.append(int(part2))\n",
    "            labels = torch.tensor(labels)\n",
    "            # print(labels)\n",
    "        \n",
    "            ### 修改pth文件\n",
    "            for item in prediction.values():\n",
    "                item['pred_class'] = labels\n",
    "        \n",
    "torch.save(predictions, predictions_save_path)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
