{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "36c88272-9801-4b54-ae80-5dd319fb26e7",
   "metadata": {},
   "source": [
    "# Creting the splits for the dataset with synthetic data\n",
    "### The validation only contains real data and the training data cannot contain the same cases as in the validation (even with the synthetic tumour)\n",
    "Each split was created to follow the same split from when only real data was used for training. \n",
    "In case you want to use this notebook, you need to first create the 5 folds with only real data.\n",
    "Check the example/splits_final* file content."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf992a7a-e2b4-49ab-b6f0-458b96c24ebb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import os\n",
    "from os import listdir\n",
    "from os.path import join\n",
    "\n",
    "def create_json(Dataset_name_real, Dataset_name_fake):\n",
    "    # Specify the path to the JSON file\n",
    "    Dataset_splits_json = f\"./nnUNet_preprocessed/{Dataset_name_real}/splits_final.json\"\n",
    "\n",
    "    # Open the file and load its contents\n",
    "    with open(Dataset_splits_json, 'r') as file:\n",
    "        Dataset_splits = json.load(file)\n",
    "        \n",
    "    all_folds = []\n",
    "    for fold in range(5):    \n",
    "        # Get the validation splits for this fold\n",
    "        Dataset_splits_val = Dataset_splits[fold]['val']\n",
    "\n",
    "        # Create a list with all the cases of the synthetic dataset\n",
    "        synthetic_dataset_L = []\n",
    "        for case in listdir(f\"./nnUNet_raw/{Dataset_name_fake}/labelsTr\"):\n",
    "            case_name = case.split(\".nii.gz\")[0]\n",
    "            synthetic_dataset_L.append(case_name)\n",
    "        print(f\"len(synthetic_dataset_L): {len(synthetic_dataset_L)}\")\n",
    "\n",
    "        # Remove the validation cases from the entire list of cases\n",
    "        for case in Dataset_splits_val:\n",
    "            synthetic_dataset_L = [s for s in synthetic_dataset_L if case not in s]\n",
    "        fake_train_L = synthetic_dataset_L\n",
    "        print(f\"len(fake_train_L): {len(fake_train_L)}\")\n",
    "\n",
    "        # create the split dict for this fold\n",
    "        this_fold = {\n",
    "            \"train\": fake_train_L,\n",
    "            \"val\": Dataset_splits_val\n",
    "        }\n",
    "\n",
    "        all_folds.append(this_fold)\n",
    "\n",
    "    # Save the new data split \n",
    "    with open(Dataset_splits_json, 'w') as file:\n",
    "        json.dump(all_folds, file, indent=4)\n",
    "\n",
    "    print(f\"In something prints after this, something is wrong with the dataset\")\n",
    "    # double check if everything if correct\n",
    "    # Open the file and load its contents\n",
    "    with open(Dataset_splits_json, 'r') as file:\n",
    "        Dataset_splits = json.load(file)\n",
    "        \n",
    "    for split in Dataset_splits:\n",
    "        for val_case in split[\"val\"]:\n",
    "            for train_case in split[\"train\"]:\n",
    "                if val_case in train_case:\n",
    "                    print(val_case)\n",
    "\n",
    "Dataset_name_real = \"Dataset231_BraTS_2023\"\n",
    "Dataset_name_fake = \"Dataset232_BraTS_2023_rGANs\"\n",
    "create_json(Dataset_name_real=Dataset_name_real, Dataset_name_fake=Dataset_name_fake)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fd88be6c",
   "metadata": {},
   "source": [
    "## For the MedNeXt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7b0dcba",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "import json\n",
    "import os\n",
    "from os import listdir\n",
    "from os.path import join\n",
    "import numpy as np\n",
    "from collections import OrderedDict\n",
    "\n",
    "\n",
    "def create_json(Dataset_name_real, Dataset_name_fake):\n",
    "    # Path to your .pkl file\n",
    "    Dataset_splits_pkl = f'./nnUNet_preprocessed/{Dataset_name_real}/splits_final.pkl'\n",
    "\n",
    "    # Open the file in binary read mode\n",
    "    with open(Dataset_splits_pkl, 'rb') as file:\n",
    "        Dataset_splits = pickle.load(file)\n",
    "\n",
    "    all_folds = []\n",
    "        \n",
    "    for fold in range(5):  \n",
    "        Dataset_splits_val = Dataset_splits[fold]['val']\n",
    "\n",
    "        # Create a list with all the cases of the synthetic dataset\n",
    "        synthetic_dataset_L = []\n",
    "        for case in listdir(\"./nnUNet_preprocessed/{Dataset_name_fake}/gt_segmentations\"):\n",
    "            case_name = case.split(\".nii.gz\")[0]\n",
    "            synthetic_dataset_L.append(case_name)\n",
    "        print(f\"len(synthetic_dataset_L): {len(synthetic_dataset_L)}\")\n",
    "\n",
    "        # Remove the validation cases from the entire list of cases\n",
    "        for case in Dataset_splits_val:\n",
    "            synthetic_dataset_L = [s for s in synthetic_dataset_L if case not in s]\n",
    "        fake_train_L = synthetic_dataset_L\n",
    "        print(f\"len(fake_train_L): {len(fake_train_L)}\")\n",
    "\n",
    "        # create the split dict for this fold\n",
    "        train_array = np.array(fake_train_L, dtype='<U19')\n",
    "        val_array = np.array(Dataset_splits_val, dtype='<U19')\n",
    "        \n",
    "        this_fold = OrderedDict([(\"train\", train_array), (\"val\", val_array)])\n",
    "\n",
    "        all_folds.append(this_fold)\n",
    "\n",
    "\n",
    "Dataset_name_real = \"Task231_BraTS_2023\"\n",
    "Dataset_name_fake = \"Task232_BraTS_2023_rGANs\"\n",
    "create_json(Dataset_name_real=Dataset_name_real, Dataset_name_fake=Dataset_name_fake)\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python (wdm)",
   "language": "python",
   "name": "wdm"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
