{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "b2033a13",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "import ast\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "38da2645",
   "metadata": {},
   "outputs": [],
   "source": [
    "# data = pd.read_parquet('roi_data_similarmerged_facecorrected_tracesfixed_textpadded_asrcorrected_final_clustersadded_medicalcontentdetected_rerun_uniqueidentifieradded.parquet')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c00570bb",
   "metadata": {},
   "source": [
    "# Let's start by conversations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7f607d82",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_parquet('roi_data_similarmerged_facecorrected_tracesfixed_textpadded_asrcorrected_final_clustersadded_medicalcontentdetected_rerun_uniqueidentifieradded_CONVERSATION.parquet')\n",
    "# and let's get a small chunk\n",
    "data = data[0:20]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "aca755fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_conversation(text):\n",
    "    # Split the text by double newlines to get the separate turns of the conversation\n",
    "    turns = text.split(\"\\n\\n\")\n",
    "\n",
    "    # Initialize an empty list to hold the structured conversation\n",
    "    structured_conversation = []\n",
    "\n",
    "    # Use a loop to process each turn of the conversation\n",
    "    for turn in turns:\n",
    "        # Split the turn into the speaker and what they said\n",
    "        speaker, speech = turn.split(\": \", 1)\n",
    "        \n",
    "        # Check if this is the first turn by \"human\" and prepend or append \"<image>\\n\"\n",
    "        if speaker == \"User\" and len(structured_conversation) == 0:\n",
    "            if random.choice([True, False]):\n",
    "                speech = \"<image>\\n\" + speech  # either add at the front or at the end randomly selected like llava\n",
    "            else:\n",
    "                speech = speech + \"\\n<image>\"  \n",
    "              \n",
    "\n",
    "        # Map 'User' and 'GPT' to 'human' and 'gpt' respectively\n",
    "        from_whom = \"human\" if speaker == \"User\" else \"gpt\"\n",
    "\n",
    "        # Add this turn to the structured conversation\n",
    "        structured_conversation.append({\n",
    "            \"from\": from_whom,\n",
    "            \"value\": speech\n",
    "        })\n",
    "\n",
    "    return structured_conversation\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# Let's assume 'data' is your DataFrame\n",
    "# data = pd.read_csv('your_data.csv')  # If it's not already loaded\n",
    "\n",
    "json_list = []\n",
    "conversation_dir = 'quilt_instruction_tuning_dataset/conversation/'\n",
    "\n",
    "for index, row in data.iterrows():\n",
    "    # Assuming you have a similar method to get your conversation text from files\n",
    "    file_path = os.path.join(conversation_dir, f\"{row['unique_row_id']}_conversation.txt\")\n",
    "\n",
    "    try:\n",
    "        with open(file_path, 'r') as file:\n",
    "            conversation_text = file.read()\n",
    "    except FileNotFoundError:\n",
    "        print(f\"File not found: {file_path}\")\n",
    "        continue\n",
    "\n",
    "    # Use the function to parse conversations\n",
    "    conversations = parse_conversation(conversation_text)\n",
    "\n",
    "    # Create the record dictionary\n",
    "    record = {\n",
    "        \"id\": row['unique_row_id'],\n",
    "        \"image\": row['image_path'].item(0),  # Assuming 'image_path' is the column name in your DataFrame\n",
    "        \"conversations\": conversations\n",
    "    }\n",
    "\n",
    "    json_list.append(record)\n",
    "    \n",
    "json_data = json.dumps(json_list, indent=4)\n",
    "\n",
    "# Write JSON data to a file\n",
    "with open('trial_conversations.json', 'w') as json_file:\n",
    "    json_file.write(json_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b988f5cc",
   "metadata": {},
   "source": [
    "# Now Complex Reasoning:\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "2cf6a368",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_parquet('iterative_abduction_and_complex_reasoning.parquet')\n",
    "data = data[0:500]\n",
    "# data = data[0:20]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "5eb4b80e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "File not found: quilt_instruction_tuning_dataset/complex_reasoning/lWTeEfSE2oY_187_complex_reasoning.txt\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "import json\n",
    "import random  # Import the random module\n",
    "\n",
    "def parse_single_interaction(text):\n",
    "    # Split the text by double newlines to get the separate parts of the interaction\n",
    "    parts = text.split(\"\\n\\n\")\n",
    "\n",
    "    # Initialize an empty list to hold the structured conversation\n",
    "    structured_conversation = []\n",
    "\n",
    "    # The first part is always from the \"User\"\n",
    "    user_question = parts[0].split(\": \", 1)[1]  # Get the text after \"User: \"\n",
    "\n",
    "    # Randomly decide whether to add \"<image>\\n\" at the beginning or the end\n",
    "    if random.choice([True, False]):\n",
    "        user_question = \"<image>\\n\" + user_question  # Prepend\n",
    "    else:\n",
    "        user_question = user_question + \"\\n<image>\"  # Append\n",
    "\n",
    "    structured_conversation.append({\n",
    "        \"from\": \"human\",\n",
    "        \"value\": user_question\n",
    "    })\n",
    "\n",
    "    # The remaining text is the response from GPT\n",
    "    gpt_response = \"\\n\\n\".join(parts[1:]).split(\": \", 1)[1]  # Get the text after \"GPT: \", and join any separate paragraphs\n",
    "\n",
    "    structured_conversation.append({\n",
    "        \"from\": \"gpt\",\n",
    "        \"value\": gpt_response\n",
    "    })\n",
    "\n",
    "    return structured_conversation\n",
    "\n",
    "# Load your DataFrame here. This is just an example, replace it with your actual DataFrame loading code.\n",
    "# data = pd.read_csv('your_data.csv')\n",
    "\n",
    "json_list = []\n",
    "conversation_dir = 'quilt_instruction_tuning_dataset/complex_reasoning/'\n",
    "\n",
    "for index, row in data.iterrows():\n",
    "    # Construct the file path\n",
    "    file_path = os.path.join(conversation_dir, f\"{row['unique_row_id']}_complex_reasoning.txt\")\n",
    "\n",
    "    try:\n",
    "        # Read the conversation text from the file\n",
    "        with open(file_path, 'r') as file:\n",
    "            conversation_text = file.read()\n",
    "    except FileNotFoundError:\n",
    "        print(f\"File not found: {file_path}\")\n",
    "        continue\n",
    "\n",
    "    # Parse the interaction\n",
    "    conversations = parse_single_interaction(conversation_text)\n",
    "\n",
    "    # Create the record dictionary\n",
    "    record = {\n",
    "        \"id\": row['unique_row_id'],\n",
    "        \"image\": row['image_path'].item(0),  # Make sure 'image_path' is the column name in your DataFrame\n",
    "        \"conversations\": conversations\n",
    "    }\n",
    "\n",
    "    json_list.append(record)\n",
    "\n",
    "# Convert the list to JSON\n",
    "json_data = json.dumps(json_list, indent=4)\n",
    "\n",
    "# Write the JSON data to a file\n",
    "with open('trial_complex_reasoning.json', 'w') as json_file:\n",
    "    json_file.write(json_data)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1275afcc",
   "metadata": {},
   "source": [
    "# Detailed Description"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "2637d010",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_parquet('roi_data_similarmerged_facecorrected_tracesfixed_textpadded_asrcorrected_final_clustersadded_medicalcontentdetected_rerun_uniqueidentifieradded_DETAILED_DESCRIPTION.parquet')  \n",
    "data = data[0:200]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "2ec1a0a4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "File not found: quilt_instruction_tuning_dataset/detailed_description/lPuADeCTsqo_74482_detailed_description.txt\n"
     ]
    }
   ],
   "source": [
    "# have a pre-created questions\n",
    "\n",
    "detailed_description_questions = [\"Can you provide a concise description of the histopathology image shown?\",\n",
    "\"How would you summarize the features of the histopathology image displayed?\",\n",
    "\"What are the key observations in the presented histopathology image?\",\n",
    "\"Briefly outline the characteristics of the histopathology image in view.\",\n",
    "\"In a few words, what does the histopathology image depict?\",\n",
    "\"What essential information emerges from the histopathology image provided?\",\n",
    "\"Could you distill the contents of the displayed histopathology image into a brief description?\",\n",
    "\"Offer a succinct report on the histopathological aspects evident in the image.\",\n",
    "\"Identify and briefly discuss the components of the histopathology image.\",\n",
    "\"How would you encapsulate the findings presented in the histopathology image?\",\n",
    "\"What insights can be concisely derived from the histopathology image on display?\"]\n",
    "\n",
    "# Function to create a structured interaction with a randomly chosen question and the GPT's response\n",
    "def create_single_interaction(gpt_response):\n",
    "    # Randomly select a question from the predefined list\n",
    "    user_question = random.choice(detailed_description_questions)\n",
    "\n",
    "    # Randomly decide whether to add \"<image>\\n\" at the beginning or the end\n",
    "    if random.choice([True, False]):\n",
    "        user_question = \"<image>\\n\" + user_question  # Prepend\n",
    "    else:\n",
    "        user_question = user_question + \"\\n<image>\"  # Append\n",
    "\n",
    "    # Build the structured conversation\n",
    "    structured_conversation = [\n",
    "        {\n",
    "            \"from\": \"human\",\n",
    "            \"value\": user_question\n",
    "        },\n",
    "        {\n",
    "            \"from\": \"gpt\",\n",
    "            \"value\": gpt_response\n",
    "        }\n",
    "    ]\n",
    "\n",
    "    return structured_conversation\n",
    "\n",
    "# Load your DataFrame here. This is just an example, replace it with your actual DataFrame loading code.\n",
    "# data = pd.read_csv('your_data.csv')\n",
    "\n",
    "json_list = []\n",
    "description_dir = 'quilt_instruction_tuning_dataset/detailed_description/'\n",
    "\n",
    "for index, row in data.iterrows():\n",
    "    # Construct the file path for detailed descriptions\n",
    "    file_path = os.path.join(description_dir, f\"{row['unique_row_id']}_detailed_description.txt\")\n",
    "\n",
    "    try:\n",
    "        # Read the detailed description from the file\n",
    "        with open(file_path, 'r') as file:\n",
    "            description_text = file.read().strip()  # Read and strip any leading/trailing whitespace\n",
    "    except FileNotFoundError:\n",
    "        print(f\"File not found: {file_path}\")\n",
    "        continue\n",
    "\n",
    "    # Create the interaction using the description as GPT's response\n",
    "    interactions = create_single_interaction(description_text)\n",
    "\n",
    "    # Create the record dictionary\n",
    "    record = {\n",
    "        \"id\": row['unique_row_id'],\n",
    "        \"image\": row['image_path'].item(0),  # Make sure 'image_path' is the column name in your DataFrame\n",
    "        \"conversations\": interactions\n",
    "    }\n",
    "\n",
    "    json_list.append(record)\n",
    "\n",
    "# Convert the list to JSON\n",
    "json_data = json.dumps(json_list, indent=4)\n",
    "\n",
    "# Write the JSON data to a file\n",
    "with open('trial_detailed_description.json', 'w') as json_file:\n",
    "    json_file.write(json_data)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "184d52c3",
   "metadata": {},
   "source": [
    "# Iterative Abduction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "351684e0",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_parquet('iterative_abduction_and_complex_reasoning.parquet')\n",
    "data = data[0:50]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "9b6eb122",
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "\n",
    "\n",
    "def format_for_qa(simplified_text, image_identifier):\n",
    "    segments = simplified_text.split('\\n')\n",
    "    structured_qa = []\n",
    "    first_user_segment = True  # Flag to identify the first User segment\n",
    "\n",
    "    for i in range(0, len(segments), 2):\n",
    "        user_segment = segments[i].replace('User: ', '').strip()\n",
    "        gpt_segment = segments[i + 1].replace('GPT: ', '').strip()\n",
    "\n",
    "        # Check if this is the first User segment and insert the image identifier accordingly\n",
    "        if first_user_segment:\n",
    "            if random.choice([True, False]):\n",
    "                user_segment = f\"{image_identifier}\\n{user_segment}\"\n",
    "            else:\n",
    "                user_segment = f\"{user_segment}\\n{image_identifier}\"\n",
    "            first_user_segment = False  # Update the flag after processing the first User segment\n",
    "\n",
    "        structured_qa.append({\n",
    "            'role': 'User',\n",
    "            'content': user_segment\n",
    "        })\n",
    "\n",
    "        structured_qa.append({\n",
    "            'role': 'GPT',\n",
    "            'content': gpt_segment\n",
    "        })\n",
    "\n",
    "    return structured_qa\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "json_list = []\n",
    "abduction_dir = 'quilt_instruction_tuning_dataset/iterative_abduction/'\n",
    "\n",
    "for index, row in data.iterrows():\n",
    "    # Construct the file path for each conversation\n",
    "    file_path = os.path.join(abduction_dir, f\"{row['unique_row_id']}_iterative_abduction.txt\")\n",
    "\n",
    "    try:\n",
    "        with open(file_path, 'r') as file:\n",
    "            conversation_text = file.read()\n",
    "    except FileNotFoundError:\n",
    "        print(f\"File not found: {file_path}\")\n",
    "        continue\n",
    "\n",
    "    # Simplify the conversation text\n",
    "    simplified_text = simplify_conversation(conversation_text)\n",
    "    \n",
    "    # Format the conversation for Q/A structure\n",
    "    image_identifier = \"<image>\"  # Or whatever identifier you use for images\n",
    "    conversations = format_for_qa(simplified_text, image_identifier)\n",
    "\n",
    "    # Create the record dictionary\n",
    "    record = {\n",
    "        \"id\": row['unique_row_id'],\n",
    "        \"image\": row['image_path'].item(0),  # Assuming 'image_path' is the column name in your DataFrame\n",
    "        \"conversations\": conversations\n",
    "    }\n",
    "\n",
    "    json_list.append(record)\n",
    "\n",
    "# Convert the list to JSON\n",
    "json_data = json.dumps(json_list, indent=4)\n",
    "\n",
    "# Writing JSON data to a file\n",
    "with open('trial_iterative_abductions.json', 'w') as json_file:\n",
    "    json_file.write(json_data)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c5f4beff",
   "metadata": {},
   "source": [
    "# Merge it all and save"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "81c66b2c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The data has been merged and saved to quilt_instruct.json.\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import os\n",
    "\n",
    "# List of your JSON files\n",
    "json_files = [\n",
    "    'trial_complex_reasoning.json',\n",
    "    'trial_conversations.json',\n",
    "    'trial_detailed_description.json',\n",
    "    'trial_iterative_abductions.json'\n",
    "]\n",
    "\n",
    "# This list will store all the data loaded from the JSON files\n",
    "merged_data = []\n",
    "\n",
    "# Iterate through the list of files\n",
    "for json_file in json_files:\n",
    "    if os.path.exists(json_file):\n",
    "        with open(json_file, 'r') as file:\n",
    "            # Load the data from the JSON file\n",
    "            data = json.load(file)\n",
    "            \n",
    "            # Assuming the data is a list of records, extend the merged data\n",
    "            # If the structure is different, you may need to adjust this part\n",
    "            merged_data.extend(data)\n",
    "    else:\n",
    "        print(f\"Warning: {json_file} does not exist and will be skipped.\")\n",
    "\n",
    "\n",
    "random.shuffle(merged_data)\n",
    "\n",
    "output_file = 'quilt_instruct.json'\n",
    "with open(output_file, 'w') as file:\n",
    "    json.dump(merged_data, file, indent=4)\n",
    "\n",
    "print(f\"The data has been merged and saved to {output_file}.\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "86f4d679",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "768"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(merged_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "8b726430",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: There are duplicates for the following IDs: ['hZG0anMugpE_50', 'x7Ubz-ZcZr0_2', 'QEnRx-H2l90_12', 'x7Ubz-ZcZr0_3', 'fl_MV-pnMC8_18', 'x7Ubz-ZcZr0_3', 'c70DOMvXQkU_19', 'chx1gb1LuIc_88', 'hZG0anMugpE_45', 'QEnRx-H2l90_11', 'fl_MV-pnMC8_16', 'hZG0anMugpE_41', 'JKE0vWqKbc8_8', 'hZG0anMugpE_46', 'x7Ubz-ZcZr0_1', 'chx1gb1LuIc_87', 'QEnRx-H2l90_13', 'x7Ubz-ZcZr0_0', 'hZG0anMugpE_59', 'hZG0anMugpE_42', 'hZG0anMugpE_52', 'i3nfBWinQaE_379', 'JKE0vWqKbc8_7', 'U1RnDnPWlBA_15', 'hZG0anMugpE_47', 'JKE0vWqKbc8_6', 'hZG0anMugpE_56', 'QEnRx-H2l90_11', 'JKE0vWqKbc8_7', 'c70DOMvXQkU_20', 'hZG0anMugpE_57', 'QEnRx-H2l90_10', 'c70DOMvXQkU_20', 'hZG0anMugpE_53', 'U1RnDnPWlBA_14', 'chx1gb1LuIc_86', 'hZG0anMugpE_55', 'chx1gb1LuIc_90', 'hZG0anMugpE_40', 'JKE0vWqKbc8_8', 'chx1gb1LuIc_85', 'x7Ubz-ZcZr0_0', 'chx1gb1LuIc_82', 'JKE0vWqKbc8_9', 'QEnRx-H2l90_12', 'fl_MV-pnMC8_17', 'hZG0anMugpE_54', 'hZG0anMugpE_44', 'JKE0vWqKbc8_9', 'chx1gb1LuIc_89', 'chx1gb1LuIc_92', 'fl_MV-pnMC8_16', 'x7Ubz-ZcZr0_2', 'chx1gb1LuIc_83', 'hZG0anMugpE_58', 'pzh2I_ZuezQ_487', 'c70DOMvXQkU_19', 'hZG0anMugpE_51', 'chx1gb1LuIc_91', 'x7Ubz-ZcZr0_1', 'hZG0anMugpE_49', 'hZG0anMugpE_48', 'QEnRx-H2l90_13', 'chx1gb1LuIc_84', 'U1RnDnPWlBA_15', 'hZG0anMugpE_43']\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "\n",
    "# Define the name of the input file\n",
    "input_file = 'quilt_instruct.json'\n",
    "\n",
    "# This set will store the IDs\n",
    "unique_ids = set()\n",
    "\n",
    "# This list will store duplicate IDs if any\n",
    "duplicate_ids = []\n",
    "\n",
    "# Read the data from the JSON file\n",
    "try:\n",
    "    with open(input_file, 'r') as file:\n",
    "        records = json.load(file)\n",
    "\n",
    "    # Extract and check the IDs\n",
    "    for record in records:\n",
    "        record_id = record.get('id')\n",
    "        if record_id in unique_ids:\n",
    "            duplicate_ids.append(record_id)  # This ID is a duplicate.\n",
    "        else:\n",
    "            unique_ids.add(record_id)  # This ID is unique, so add it to the set.\n",
    "\n",
    "    # Check if there were any duplicates\n",
    "    if duplicate_ids:\n",
    "        print(f\"Warning: There are duplicates for the following IDs: {duplicate_ids}\")\n",
    "    else:\n",
    "        print(\"Success: All IDs are unique.\")\n",
    "\n",
    "except FileNotFoundError:\n",
    "    print(f\"Error: {input_file} not found.\")\n",
    "except json.JSONDecodeError:\n",
    "    print(f\"Error: {input_file} is not a valid JSON file.\")\n",
    "except Exception as e:\n",
    "    print(f\"An error occurred: {e}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a108b978",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pathvid_pretrain",
   "language": "python",
   "name": "pathvid_pretrain"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
