{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import yaml\n",
    "from models import CLIPWrapper\n",
    "import torch\n",
    "import torchvision.transforms as T\n",
    "from PIL import Image\n",
    "import clip\n",
    "import os\n",
    "\n",
    "# Constants\n",
    "CHECKPOINT = 'lightning_logs/version_7/checkpoints/epoch=15-step=17823.ckpt'\n",
    "TEST_IMG_DIR = './train-clip4/images'\n",
    "LABELS_FILE = './train-clip4/clean_descriptions.txt'\n",
    "MODEL_NAME = 'RN50'\n",
    "DEVICE = 'cuda'\n",
    "config_dir = 'models/configs/ViT.yaml' if 'ViT' in MODEL_NAME else 'models/configs/RN.yaml'\n",
    "\n",
    "# Load model configuration\n",
    "with open(config_dir) as fin:\n",
    "    config = yaml.safe_load(fin)[MODEL_NAME]\n",
    "\n",
    "# Load the model\n",
    "model = CLIPWrapper.load_from_checkpoint(CHECKPOINT, model_name=MODEL_NAME, config=config, minibatch_size=1).model.to(\n",
    "    DEVICE)\n",
    "\n",
    "\n",
    "# 图像预处理函数，确保图像为RGB格式\n",
    "def fix_img(img):\n",
    "    return img.convert('RGB') if img.mode != 'RGB' else img\n",
    "\n",
    "\n",
    "# Image transformation pipeline\n",
    "# 图像变换管道，包括修正图像格式、随机裁剪、转换为Tensor、标准化处理\n",
    "image_transform = T.Compose([\n",
    "    T.Lambda(fix_img),\n",
    "    T.RandomResizedCrop(224, scale=(0.75, 1.), ratio=(1., 1.)),\n",
    "    T.ToTensor(),\n",
    "    T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n",
    "])\n",
    "\n",
    "# Read labels from file and create a dictionary mapping filenames to labels\n",
    "with open(LABELS_FILE, 'r') as file:\n",
    "    label_lines = file.readlines()\n",
    "\n",
    "image_labels = {}\n",
    "\n",
    "for line in label_lines:\n",
    "    parts = line.strip().split(' ', 1)\n",
    "    if len(parts) == 2:\n",
    "    #image_filename：值 label 键\n",
    "        image_filename, label = parts\n",
    "        image_labels[label] = image_filename\n",
    "        #print(f\"The label is {label}:The image name is {image_labels[label]}\")\n",
    "\n",
    "\n",
    "# Extract unique labels\n",
    "unique_labels = list(set(image_labels))\n",
    "\n",
    "# Initialize variables for accuracy calculation\n",
    "correct_predictions = 0\n",
    "total_predictions = 0\n",
    "\n",
    "# Model prediction and accuracy calculation\n",
    "with torch.no_grad():\n",
    "    for img_file in os.listdir(TEST_IMG_DIR):\n",
    "        img_path = os.path.join(TEST_IMG_DIR, img_file)\n",
    "        # 只检查Zero Shot\n",
    "        if img_file not in image_labels:\n",
    "            continue\n",
    "\n",
    "        img_emb = image_transform(Image.open(img_path)).unsqueeze(0).to(DEVICE)\n",
    "        img_enc = model.encode_image(img_emb)\n",
    "\n",
    "        text_emb = clip.tokenize(unique_labels, truncate=True).to(DEVICE)\n",
    "        text_enc = model.encode_text(text_emb)\n",
    "\n",
    "        img_enc /= img_enc.norm(dim=-1, keepdim=True)\n",
    "        text_enc /= text_enc.norm(dim=-1, keepdim=True)\n",
    "\n",
    "        similarity = (100.0 * img_enc @ text_enc.T).softmax(dim=-1)\n",
    "        values, indices = similarity[0].topk(1)\n",
    "\n",
    "        # Get the predicted label and probability\n",
    "        predicted_label = unique_labels[indices[0]]\n",
    "        probability = values[0].item()\n",
    "\n",
    "        # Print the result\n",
    "        print(f\"\\n Top prediction for {img_file}:\\n\")  # Test image file name\n",
    "        print(f\"Correct label: {image_labels[label]}\") # Key(Lable) -> Value(FileName)\n",
    "        print(f\"Predicted label: {predicted_label}\")   # Modal output(Lable)\n",
    "        print(f\"Probability: {probability:.2f}%\")\n",
    "\n",
    "    # Check if the prediction is correct\n",
    "        if predicted_label == image_labels[label]:\n",
    "            correct_predictions += 1\n",
    "        total_predictions += 1\n",
    "\n",
    "# Calculate and print the average accuracy\n",
    "accuracy = (correct_predictions / total_predictions) * 100\n",
    "print(f\"\\nAverage accuracy: {accuracy:.2f}%\")\n",
    "\n",
    "\n",
    "image_labels = {}  \n",
    "  \n",
    "# 假设你有一个包含图像文件名和对应标签的列表  \n",
    "image_filenames_and_labels = [('image1.jpg', 'cat'), ('image2.jpg', 'dog'), ('image3.jpg', 'cat')]  \n",
    "  \n",
    "# 遍历这个列表，将文件名添加到对应标签的列表中  \n",
    "for filename, label in image_filenames_and_labels:  \n",
    "    if label in image_labels:  \n",
    "        # 如果标签已经存在，将文件名添加到列表中  \n",
    "        image_labels[label].append(filename)  \n",
    "    else:  \n",
    "        # 如果标签不存在，创建一个新列表  \n",
    "        image_labels[label] = [filename]  \n",
    "  \n",
    "# 打印结果  \n",
    "print(image_labels)\n",
    "\n",
    "# Check if the prediction is correct  \n",
    "if predicted_label in image_labels[label]:  \n",
    "    correct_predictions += 1  \n",
    "total_predictions += 1\n",
    "\n",
    "       # Print the result\n",
    "        print(f\"\\n prediction for {img_file}:\\n\")\n",
    "        print(f\"Predicted label: {predicted_label}\")\n",
    "        print(f\"label img_file: {image_labels[predicted_label]}\")\n",
    "        print(f\"Probability: {probability:.2f}%\")\n",
    "\n",
    "\n",
    "import yaml\n",
    "from models import CLIPWrapper\n",
    "import torch\n",
    "import torchvision.transforms as T\n",
    "from PIL import Image\n",
    "import clip\n",
    "import os\n",
    "#train-clip :trainword       train-clip2 :validword  train-clip2  :test_word\n",
    "CHECKPOINT = 'lightning_logs/version_7/checkpoints/epoch=15-step=17823.ckpt'  # 模型路径\n",
    "TEST_IMG_DIR = './train-clip2/images'  # 包含图像的文件夹路径\n",
    "LABELS_FILE = './train-clip2/text.txt'  # 包含标签的文本文件路径\n",
    "MODEL_NAME = 'RN50'\n",
    "DEVICE = 'cuda'\n",
    "config_dir = 'models/configs/ViT.yaml' if 'ViT' in MODEL_NAME else 'models/configs/RN.yaml'\n",
    "\n",
    "with open(config_dir) as fin:\n",
    "    config = yaml.safe_load(fin)[MODEL_NAME]\n",
    "    \n",
    "model = CLIPWrapper.load_from_checkpoint(CHECKPOINT, model_name=MODEL_NAME, config=config, minibatch_size=1).model.to(DEVICE)\n",
    "\n",
    "def fix_img(img):\n",
    "    return img.convert('RGB') if img.mode != 'RGB' else img\n",
    "\n",
    "image_transform = T.Compose([\n",
    "    T.Lambda(fix_img),\n",
    "    T.RandomResizedCrop(224, scale=(0.75, 1.), ratio=(1., 1.)),\n",
    "    T.ToTensor(),\n",
    "    T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n",
    "])\n",
    "\n",
    "with open(LABELS_FILE, 'r') as file:\n",
    "    labels = file.readlines()\n",
    "\n",
    "image_labels = {}\n",
    "\n",
    "for line in label_lines:\n",
    "    parts = line.strip().split(' ', 1)\n",
    "    if len(parts) == 2:\n",
    "        image_filename, label = parts\n",
    "        image_labels[label] = image_filename\n",
    "        #print(f\"The label is {label}:The image name is {image_labels[label]}\")\n",
    "\n",
    "# Extract unique labels\n",
    "unique_labels = list(set(image_labels))\n",
    "\n",
    "#Remove newline characters from the labels\n",
    "labels = [label.strip() for label in labels]\n",
    "\n",
    "# Initialize variables for accuracy calculation\n",
    "correct_predictions = 0\n",
    "total_predictions = 0\n",
    "\n",
    "with torch.no_grad():\n",
    "    for img_file in os.listdir(TEST_IMG_DIR):\n",
    "        img_path = os.path.join(TEST_IMG_DIR, img_file)\n",
    "        img_emb = image_transform(Image.open(img_path)).unsqueeze(0).to(DEVICE)\n",
    "        img_enc = model.encode_image(img_emb)\n",
    "        text_emb = clip.tokenize(labels, truncate=True).to(DEVICE)\n",
    "        text_enc = model.encode_text(text_emb)\n",
    "\n",
    "        img_enc /= img_enc.norm(dim=-1, keepdim=True)\n",
    "        text_enc /= text_enc.norm(dim=-1, keepdim=True)\n",
    "        similarity = (100.0 * img_enc @ text_enc.T).softmax(dim=-1)\n",
    "        values, indices = similarity[0].topk(1)\n",
    "\n",
    "    # Get the predicted label and probability\n",
    "        predicted_label = labels[indices[0]]\n",
    "        probability = values[0].item()\n",
    "\n",
    "    # Print the result\n",
    "    print(f\"\\nTop prediction for {img_file}:\\n\")\n",
    "    print(f\"Predicted label: {predicted_label}\")\n",
    "    print(f\"label img_file: {image_labels[predicted_label]}\")\n",
    "    print(f\"Probability: {probability:.2f}%\")\n"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
