{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "70429337",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import numpy as np\n",
    "import sys\n",
    "from torch.utils.data import DataLoader, Subset\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "notebook_dir = os.getcwd()\n",
    "project_root_path = os.path.dirname(notebook_dir)\n",
    "sys.path.insert(0, project_root_path)\n",
    "\n",
    "from src.config import PROJECT_ROOT\n",
    "\n",
    "from src.utils import get_filename_to_id_mapping\n",
    "from src import ImageConceptDataset"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "beaefdc7",
   "metadata": {},
   "source": [
    "## 1. Transform Images to Tensors\n",
    "Convert each image to a tensor of shape - (3, 299, 299).\n",
    "All tensors are stored in a list to improve efficiency. \n",
    "- tensors and np arrays require a single, contiguous block of memory\n",
    "- would be > 12GB with all of our image tensors (all in ram)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ffab0a4f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 11788 images.\n",
      "Processing in 369 batches of size 32 (for progress reporting)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Processing batches: 100%|██████████| 369/369 [01:05<00:00,  5.60it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Finished processing.\n",
      "Successfully transformed: 11788 images.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "# LOAD AND TRANSFORM IMAGES\n",
    "from src.preprocessing.image_processing import load_and_transform_images\n",
    "\n",
    "\n",
    "input_dir = os.path.join(PROJECT_ROOT, 'images', 'CUB')\n",
    "resol = 299\n",
    "training = True\n",
    "images_file = os.path.join(PROJECT_ROOT, 'data', 'CUB', 'images.txt')\n",
    "\n",
    "image_tensors, image_paths = load_and_transform_images(input_dir, images_file, resol, training, batch_size=32, verbose=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f4a2b8f6",
   "metadata": {},
   "source": [
    "## 2. Generate concept label and image label matrices"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "289158f2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 11788 unique images.\n",
      "Found 312 unique concepts.\n",
      "Generated concept matrix with shape: (11788, 312)\n"
     ]
    }
   ],
   "source": [
    "# CREATE CONCEPT LABELS MATRIX\n",
    "from src.preprocessing.CUB.data_encoding import encode_image_concepts\n",
    "\n",
    "\n",
    "concept_labels_file = os.path.join(PROJECT_ROOT, 'data', 'CUB', 'image_concept_labels.txt')\n",
    "\n",
    "concept_labels, uncertainty_matrix = encode_image_concepts(concept_labels_file, verbose=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "18cf3d80",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of unique concept vectors: 11757\n"
     ]
    }
   ],
   "source": [
    "unique_concept_vectors = np.unique(concept_labels, axis=0)\n",
    "num_unique_concept_vectors = unique_concept_vectors.shape[0]\n",
    "print(f\"Number of unique concept vectors: {num_unique_concept_vectors}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "b3c1461a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 200 classes.\n",
      "Found labels for 11788 images.\n",
      "Generated one-hot matrix with shape: (11788, 200)\n"
     ]
    }
   ],
   "source": [
    "# CREATE IMAGE LABELS MATRIX\n",
    "from src.preprocessing.CUB.data_encoding import one_hot_encode_labels\n",
    "\n",
    "\n",
    "labels_file = os.path.join(PROJECT_ROOT, 'data', 'CUB', 'image_class_labels.txt')\n",
    "classes_file = os.path.join(PROJECT_ROOT, 'data', 'CUB', 'classes.txt')\n",
    "\n",
    "image_labels = one_hot_encode_labels(labels_file, classes_file, verbose=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e14842ee",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Percentage of instances with uncertainty=1 and concept_label=0 0.10737397005211732\n"
     ]
    }
   ],
   "source": [
    "print(\"Percentage of instances with uncertainty=1 and concept_label=0\", np.sum((uncertainty_matrix == 1)&(concept_labels==0))/(uncertainty_matrix.shape[0]*uncertainty_matrix.shape[1]))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b3cebe0c",
   "metadata": {},
   "source": [
    "## 3. (Optional) Get image_id->filename mapping.\n",
    "Allows us to check that tensors and label matrices have the same order."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "7ab54760",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # GET IMAGE ID TO IMAGE FILENAME MAPPING\n",
    "# images_file = os.path.join(PROJECT_ROOT, 'data', 'CUB', 'images.txt')\n",
    "# image_id_mapping = get_filename_to_id_mapping(images_file, reverse=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "96a941d4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# i = 4242\n",
    "# print(f'Filename of image {i}: \\n\\t{image_id_mapping[i]}')\n",
    "# print(f\"Image {i} has concepts: \\n\\t{concept_labels[i]}\")\n",
    "\n",
    "# image_idx = image_paths.index(image_id_mapping[i])\n",
    "# tensor = image_tensors[image_paths.index(image_id_mapping[i])]\n",
    "\n",
    "# print(f\"Tensor index of image {i}: \\n\\t{image_idx}\")\n",
    "# print(f\"Shape of the first tensor: \\n\\t{tensor.shape}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "1ab9a18f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# i=10\n",
    "\n",
    "# image_name = image_paths[i]\n",
    "# tensor = image_tensors[i]\n",
    "\n",
    "# print(f\"Tensor index {i} has filename: \\n\\t{image_name}\")\n",
    "\n",
    "# print(f\"Filename {image_name} has index: \\n\\t{list(image_id_mapping.values()).index(image_name)}\")\n",
    "# # print(concept_labels[i])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8f4e7af5",
   "metadata": {},
   "source": [
    "## 4. Create Train Test Splits using `train_test_split.txt`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "cd789f78",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Split complete: 5994 train images, 5794 test images.\n",
      "Train set size: 5994 tensors, 5994 concepts, 5994 labels\n",
      "Test set size:  5794 tensors, 5794 concepts, 5794 labels\n"
     ]
    }
   ],
   "source": [
    "# CREATE TRAIN TEST SPLIT USING TXT FILE\n",
    "from src.preprocessing.CUB.split_train_test import split_datasets\n",
    "\n",
    "\n",
    "split_file = os.path.join(PROJECT_ROOT, 'data', 'CUB', 'train_test_split.txt')\n",
    "\n",
    "split_data = split_datasets(split_file, concept_labels, image_labels, uncertainty_matrix, image_tensors)\n",
    "\n",
    "train_concept_labels = split_data['train_concepts']\n",
    "test_concept_labels = split_data['test_concepts']\n",
    "\n",
    "train_img_labels = split_data['train_img_labels']\n",
    "test_img_labels = split_data['test_img_labels']\n",
    "\n",
    "train_uncertainty = split_data['train_uncertainty']\n",
    "\n",
    "train_tensors = split_data['train_tensors']\n",
    "test_tensors = split_data['test_tensors']\n",
    "\n",
    "print(f\"Train set size: {len(train_tensors)} tensors, {train_concept_labels.shape[0]} concepts, {train_img_labels.shape[0]} labels\")\n",
    "print(f\"Test set size:  {len(test_tensors)} tensors, {test_concept_labels.shape[0]} concepts, {test_img_labels.shape[0]} labels\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "f8d9d84b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# concept processing\n",
    "from src.preprocessing.concept_processing import compute_class_level_concepts\n",
    "\n",
    "\n",
    "class_level_concepts = compute_class_level_concepts(train_concept_labels, train_uncertainty, train_img_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "9f154129-300e-405c-8f4c-fef5a2653f77",
   "metadata": {},
   "outputs": [],
   "source": [
    "# apply class-level concepts to each instance\n",
    "from src.config import CUB_CONFIG\n",
    "from src.preprocessing.concept_processing import apply_class_concepts_to_instances\n",
    "\n",
    "class_concepts = False\n",
    "if class_concepts:\n",
    "    train_concept_labels, test_concept_labels = apply_class_concepts_to_instances(class_level_concepts, CUB_CONFIG, train_img_labels, train_concept_labels, test_img_labels, test_concept_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "ead53294-e52d-4f7d-834d-3d041bc38e6a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from src.preprocessing.concept_processing import select_common_concepts\n",
    "\n",
    "\n",
    "common_concept_indices = select_common_concepts(class_level_concepts, min_class_count=10)\n",
    "train_concept_labels = train_concept_labels[:, common_concept_indices]\n",
    "test_concept_labels = test_concept_labels[:, common_concept_indices]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bffb25c7-fabc-4df9-9089-996fd6b662d9",
   "metadata": {},
   "source": [
    "# INVESTIGATE INSTANCE DIFFERENCES"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e5552d1-98e6-4a1e-ab92-ee024f6a264d",
   "metadata": {},
   "outputs": [],
   "source": [
    "Y_train = np.argmax(train_img_labels, axis=1)\n",
    "Y_test = np.argmax(test_img_labels, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5ad51475-7a05-41c9-b0dd-9258f015c2f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"---TRAIN---\")\n",
    "threshold = 0.9\n",
    "diff_concepts_by_label = {}  # Dictionary to track different concepts by label\n",
    "# Group indices by class label\n",
    "indices_by_label = {}\n",
    "for i, y in enumerate(Y_train):\n",
    "    if y not in indices_by_label:\n",
    "        indices_by_label[y] = []\n",
    "    indices_by_label[y].append(i)\n",
    "\n",
    "# For each class, compare all pairs of instances\n",
    "for y, indices in indices_by_label.items():\n",
    "    diff_concepts_by_label[y] = 0\n",
    "\n",
    "    # Compare each pair of instances within the same class\n",
    "    for i in range(len(indices)):\n",
    "        for j in range(i+1, len(indices)):  # Only compare each pair once\n",
    "            idx1, idx2 = indices[i], indices[j]\n",
    "            different_concepts = np.abs(train_concept_labels[idx1] - train_concept_labels[idx2])\n",
    "            diff_concepts_by_label[y] += np.sum(different_concepts > threshold)\n",
    "\n",
    "# Print results for each label\n",
    "total_diff_concepts = sum(diff_concepts_by_label.values())\n",
    "# print(f\"Different concepts by label:\")\n",
    "# for label, count in sorted(diff_concepts_by_label.items()):\n",
    "#     print(f\"  Label {label}: {count} different concepts\")\n",
    "print(f\"There are {total_diff_concepts} different concepts in total.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "72cbde8d-e005-46cc-9025-2783d8c8596f",
   "metadata": {},
   "outputs": [],
   "source": [
    "difference_in_class = 0\n",
    "for i, y in enumerate(Y_train):\n",
    "    if y == 0:\n",
    "        print(f\" Image 0 vs Image {i}: {np.sum(train_concept_labels[0] - train_concept_labels[i] > threshold)}\")\n",
    "        difference_in_class += np.sum(train_concept_labels[0] - train_concept_labels[i] > threshold)\n",
    "\n",
    "print(\"Class 0:\", difference_in_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "642bc564-645e-4769-a3b9-3e298a67dabe",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"---TEST---\")\n",
    "threshold = 0.9\n",
    "diff_concepts_by_label = {}  # Dictionary to track different concepts by label\n",
    "# Group indices by class label\n",
    "indices_by_label = {}\n",
    "for i, y in enumerate(Y_test):\n",
    "    if y not in indices_by_label:\n",
    "        indices_by_label[y] = []\n",
    "    indices_by_label[y].append(i)\n",
    "\n",
    "# For each class, compare all pairs of instances\n",
    "for y, indices in indices_by_label.items():\n",
    "    diff_concepts_by_label[y] = 0\n",
    "\n",
    "    # Compare each pair of instances within the same class\n",
    "    for i in range(len(indices)):\n",
    "        for j in range(i+1, len(indices)):  # Only compare each pair once\n",
    "            idx1, idx2 = indices[i], indices[j]\n",
    "            different_concepts = np.abs(test_concept_labels[idx1] - test_concept_labels[idx2])\n",
    "            diff_concepts_by_label[y] += np.sum(different_concepts > threshold)\n",
    "\n",
    "# Print results for each label\n",
    "total_diff_concepts = sum(diff_concepts_by_label.values())\n",
    "# print(f\"Different concepts by label:\")\n",
    "# for label, count in sorted(diff_concepts_by_label.items()):\n",
    "#     print(f\"  Label {label}: {count} different concepts\")\n",
    "print(f\"There are {total_diff_concepts} different concepts in total.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "20caff93",
   "metadata": {},
   "source": [
    "## 5. Create Train and Test Datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "f5e27658",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8333333333333334\n",
      "0.8417328270624784\n"
     ]
    }
   ],
   "source": [
    "print(np.sum(train_concept_labels[:,50])/len(train_concept_labels))\n",
    "print(np.sum(test_concept_labels[:,50])/len(test_concept_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "81cfcf3d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dataset initialized with 5994 pre-sorted items.\n",
      "Train dataset length: 5994\n",
      "Dataset initialized with 5794 pre-sorted items.\n",
      "Test dataset length: 5794\n"
     ]
    }
   ],
   "source": [
    "train_dataset = ImageConceptDataset(\n",
    "    image_tensors=train_tensors,\n",
    "    concept_labels=train_concept_labels,\n",
    "    image_labels=train_img_labels\n",
    ")\n",
    "print(f\"Train dataset length: {len(train_dataset)}\")\n",
    "\n",
    "test_dataset = ImageConceptDataset(\n",
    "    image_tensors=test_tensors,\n",
    "    concept_labels=test_concept_labels,\n",
    "    image_labels=test_img_labels\n",
    ")\n",
    "print(f\"Test dataset length: {len(test_dataset)}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0eb500ea",
   "metadata": {},
   "source": [
    "**Test __getitem__**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "5c1fab1b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Item at index 10:\n",
      "\tImage Tensor Shape: torch.Size([3, 299, 299])\n",
      "\tConcept Labels Shape: torch.Size([112])\n",
      "\tImage Label Shape: torch.Size([200])\n",
      "\n",
      "\tConcept vector (first 10): [0. 1. 0. 0. 0. 0. 0. 1. 0. 0.]\n",
      "\tHas 21.0 true concepts\n",
      "\tHas concepts: ['has_bill_shape::dagger', 'has_bill_shape::cone', 'has_wing_color::rufous', 'has_wing_color::red', 'has_upperparts_color::rufous', 'has_upperparts_color::orange', 'has_underparts_color::brown', 'has_underparts_color::green', 'has_underparts_color::black', 'has_underparts_color::white', 'has_breast_pattern::spotted', 'has_back_color::purple', 'has_back_color::olive', 'has_upper_tail_color::blue', 'has_upper_tail_color::iridescent', 'has_upper_tail_color::rufous', 'has_upper_tail_color::olive', 'has_upper_tail_color::buff', 'has_head_pattern::eyebrow', 'has_head_pattern::capped', 'has_breast_color::purple']\n",
      "\n",
      "\tImage Class: 1\n"
     ]
    }
   ],
   "source": [
    "from src.preprocessing.CUB.data_encoding import get_concepts\n",
    "\n",
    "\n",
    "concept_names_path = os.path.join(PROJECT_ROOT, 'data', 'CUB', 'concepts.txt')\n",
    "image_id_mapping = get_filename_to_id_mapping(images_file, reverse=True)\n",
    "\n",
    "item_index = 10\n",
    "if item_index < len(train_dataset):\n",
    "    img_tensor, concepts, img_label, img_id = train_dataset[item_index]\n",
    "    print(f\"Item at index {item_index}:\")\n",
    "    print(f\"\\tImage Tensor Shape: {img_tensor.shape}\")\n",
    "    print(f\"\\tConcept Labels Shape: {concepts.shape}\")\n",
    "    print(f\"\\tImage Label Shape: {img_label.shape}\\n\")\n",
    "\n",
    "    # print(f\"\\tImage ID: {img_id}\")\n",
    "    # print(f\"\\tFilename (lookup): {image_id_mapping.get(img_id)}\\n\")\n",
    "\n",
    "    print(f\"\\tConcept vector (first 10): {concepts[:10].numpy()}\")\n",
    "    print(f\"\\tHas {concepts.numpy().sum()} true concepts\")\n",
    "    print(f\"\\tHas concepts: {get_concepts(concepts.numpy(), concept_names_path)}\\n\")\n",
    "\n",
    "    print(f\"\\tImage Class: {np.argmax(img_label.numpy())+1}\")\n",
    "else:\n",
    "    print(f\"Index {item_index} is out of bounds.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "df14b8fb",
   "metadata": {},
   "source": [
    "## 6. Create Train and Test DataLoaders\n",
    "These allow us to generate batches of data."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd11eb18",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 64\n",
    "# Shuffle training data\n",
    "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=False)\n",
    "print(f\"Train DataLoader created with batch size {batch_size}.\")\n",
    "# Do NOT shuffle val or test data\n",
    "#   pin_memory optimises data transfer from CPU to GPU\n",
    "test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)\n",
    "print(f\"Test DataLoader created with batch size {batch_size}.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4a28d5cf",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "de7d050f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
