{ "cells": [ { "cell_type": "code", "execution_count": 2, "id": "91af3f42-063e-4d5c-ae16-c7c54599d582", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Number of entries with angle brackets: 35\n", "Number of remaining rows: 16460737\n", "Number of distinct protein families: 10258\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 10258/10258 [00:04<00:00, 2232.02family/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Number of distinct protein families in the test set: 2076\n", "Number of distinct protein families in the train set: 8182\n", "Percentage of families in test set: 0.20237863131214662\n" ] }, { "data": { "text/plain": [ "(3307395, 13153342)" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pandas as pd\n", "import numpy as np\n", "from tqdm import tqdm\n", "\n", "# Load the dataset\n", "file_path = 'binding_sites_uniprot_16M.tsv'\n", "data = pd.read_csv(file_path, sep='\\t')\n", "\n", "# Display the first few rows of the dataframe\n", "#data.head()\n", "\n", "# Filter out rows with NaN values in the 'Protein families' column\n", "data = data[pd.notna(data['Protein families'])]\n", "\n", "# Display the first few rows of the modified dataframe\n", "#data.head()\n", "\n", "# Group the data by 'Protein families' and get the size of each group\n", "family_sizes = data.groupby('Protein families').size()\n", "\n", "# Create a new column with the size of each family\n", "data['Family size'] = data['Protein families'].map(family_sizes)\n", "\n", "# Sort the data by 'Family size' in descending order and then by 'Protein families'\n", "data_sorted = data.sort_values(by=['Family size', 'Protein families'], ascending=[False, True])\n", "\n", "# Drop the 'Family size' column as it is no longer needed\n", "data_sorted.drop(columns='Family size', inplace=True)\n", "\n", "# Define a function to extract the location from the binding and active site columns\n", "def extract_location(site_info):\n", " if pd.isnull(site_info):\n", " return None\n", " locations = []\n", " for info in site_info.split(';'):\n", " if 'BINDING' in info or 'ACT_SITE' in info:\n", " locations.append(info.split()[1])\n", " return '; '.join(locations)\n", "\n", "# Apply the function to the 'Binding site' and 'Active site' columns to extract the locations\n", "data_sorted['Binding site'] = data_sorted['Binding site'].apply(extract_location)\n", "data_sorted['Active site'] = data_sorted['Active site'].apply(extract_location)\n", "\n", "# Display the first few rows of the modified dataframe\n", "#data_sorted.head()\n", "\n", "# Create a new column that combines the 'Binding site' and 'Active site' columns\n", "data_sorted['Binding-Active site'] = data_sorted['Binding site'].astype(str) + '; ' + data_sorted['Active site'].astype(str)\n", "\n", "# Replace 'nan' values with None\n", "data_sorted['Binding-Active site'] = data_sorted['Binding-Active site'].replace('nan; nan', None)\n", "\n", "# Display the first few rows of the updated dataframe\n", "#data_sorted.head()\n", "\n", "# Find entries in the \"Binding-Active site\" column containing '<' or '>'\n", "entries_with_angle_brackets = data_sorted['Binding-Active site'].str.contains('<|>', na=False)\n", "\n", "# Get the number of such entries\n", "num_entries_with_angle_brackets = entries_with_angle_brackets.sum()\n", "\n", "# Display the number of entries containing '<' or '>'\n", "print(f\"Number of entries with angle brackets: {num_entries_with_angle_brackets}\")\n", "\n", "# Remove all rows where the \"Binding-Active site\" column contains '<' or '>'\n", "data_filtered = data_sorted[~entries_with_angle_brackets]\n", "\n", "# Get the number of remaining rows\n", "num_remaining_rows = data_filtered.shape[0]\n", "\n", "# Display the number of remaining rows\n", "print(f\"Number of remaining rows: {num_remaining_rows}\")\n", "\n", "# Get the number of distinct protein families\n", "num_distinct_families = data_filtered['Protein families'].nunique()\n", "\n", "# Display the number of distinct protein families\n", "print(f\"Number of distinct protein families: {num_distinct_families}\")\n", "\n", "# Define the target number of rows for the test set (approximately 20% of the data)\n", "target_test_rows = int(0.20 * num_remaining_rows)\n", "\n", "# Get unique protein families\n", "unique_families = data_filtered['Protein families'].unique()\n", "\n", "# Shuffle the unique families to randomize the selection\n", "np.random.shuffle(unique_families)\n", "\n", "# Group the data by 'Protein families' to facilitate faster family-wise selection\n", "grouped_data = data_filtered.groupby('Protein families')\n", "\n", "# Initialize variables to keep track of the selected rows for the test and train sets\n", "test_rows = []\n", "current_test_rows = 0\n", "\n", "# Initialize a flag to indicate whether the threshold has been crossed\n", "threshold_crossed = False\n", "\n", "# Initialize a variable to keep track of the previous family\n", "previous_family = None\n", "\n", "# Loop through the shuffled families and add rows to the test set until we reach the target number of rows\n", "for family in tqdm(unique_families, unit=\"family\"):\n", " family_rows = grouped_data.get_group(family).index.tolist()\n", " # If the threshold is not yet crossed, or the family is the same as the previous family, add the family to the test set\n", " if not threshold_crossed or (previous_family == family):\n", " test_rows.extend(family_rows)\n", " current_test_rows += len(family_rows)\n", " previous_family = family # Keep track of the previous family\n", " # Check if the threshold is crossed with the addition of the current family\n", " if current_test_rows >= target_test_rows:\n", " threshold_crossed = True # Set the flag to True once the threshold is crossed\n", "\n", "# Get the indices of the rows for the train set (all rows not in the test set) using set operations for efficiency\n", "train_rows = set(data_filtered.index) - set(test_rows)\n", "\n", "# Create the test and train datasets using loc indexer with list of indices\n", "test_df = data_filtered.loc[list(test_rows)]\n", "train_df = data_filtered.loc[list(train_rows)]\n", "\n", "# Print the number of distinct protein families in the test and train sets\n", "num_test_families = test_df['Protein families'].nunique()\n", "num_train_families = train_df['Protein families'].nunique()\n", "print(f\"Number of distinct protein families in the test set: {num_test_families}\")\n", "print(f\"Number of distinct protein families in the train set: {num_train_families}\")\n", "percentage = num_test_families/(num_test_families+num_train_families)\n", "print(f\"Percentage of families in test set: {percentage}\")\n", "\n", "test_df.shape[0], train_df.shape[0]\n" ] }, { "cell_type": "code", "execution_count": 3, "id": "772edd92-5137-486a-8a81-8ab7bf51568f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Number of common families: 0\n", "No common families between test and train datasets.\n" ] } ], "source": [ "# Get the unique families in the test and train datasets\n", "unique_test_families = set(test_df['Protein families'].unique())\n", "unique_train_families = set(train_df['Protein families'].unique())\n", "\n", "# Find the common families between the test and train datasets\n", "common_families = unique_test_families.intersection(unique_train_families)\n", "\n", "# Output the common families and their count\n", "print(f\"Number of common families: {len(common_families)}\")\n", "if len(common_families) > 0:\n", " print(f\"Common families: {common_families}\")\n", "else:\n", " print(\"No common families between test and train datasets.\")\n" ] }, { "cell_type": "code", "execution_count": 4, "id": "bc8825d6-60f8-4029-a4ab-a2317b170d09", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Number of test rows with question mark: 0\n", "Number of train rows with question mark: 2\n", "Number of remaining test rows: 3307395\n", "Number of remaining train rows: 13153340\n" ] } ], "source": [ "import re\n", "\n", "# Find rows where the \"Binding-Active site\" column contains the character \"?\", treating \"?\" as a literal character\n", "test_rows_with_question_mark = test_df[test_df['Binding-Active site'].str.contains('\\?', na=False, regex=True)]\n", "train_rows_with_question_mark = train_df[train_df['Binding-Active site'].str.contains('\\?', na=False, regex=True)]\n", "\n", "# Get the number of such rows in both datasets\n", "num_test_rows_with_question_mark = len(test_rows_with_question_mark)\n", "num_train_rows_with_question_mark = len(train_rows_with_question_mark)\n", "\n", "print(f\"Number of test rows with question mark: {num_test_rows_with_question_mark}\")\n", "print(f\"Number of train rows with question mark: {num_train_rows_with_question_mark}\")\n", "\n", "# Delete the rows containing '?' in the \"Binding-Active site\" column\n", "test_df = test_df.drop(test_rows_with_question_mark.index)\n", "train_df = train_df.drop(train_rows_with_question_mark.index)\n", "\n", "# Check the number of remaining rows in both datasets\n", "remaining_test_rows = test_df.shape[0]\n", "remaining_train_rows = train_df.shape[0]\n", "\n", "print(f\"Number of remaining test rows: {remaining_test_rows}\")\n", "print(f\"Number of remaining train rows: {remaining_train_rows}\")\n", "\n", "def expand_ranges(s):\n", " \"\"\"Expand ranges in a string.\"\"\"\n", " return re.sub(r'(\\d+)\\.\\.(\\d+)', lambda m: ', '.join(map(str, range(int(m.group(1)), int(m.group(2))+1))), str(s))\n", "\n", "# Apply the function to expand ranges in the \"Binding-Active site\" column in both datasets\n", "test_df['Binding-Active site'] = test_df['Binding-Active site'].apply(expand_ranges)\n", "train_df['Binding-Active site'] = train_df['Binding-Active site'].apply(expand_ranges)\n" ] }, { "cell_type": "code", "execution_count": 5, "id": "d91da865-495a-4c1d-91ed-0ebeff1ecd50", "metadata": {}, "outputs": [], "source": [ "def convert_to_binary_list(binding_active_str, sequence_len):\n", " \"\"\"Convert a Binding-Active site string to a binary list based on the sequence length.\"\"\"\n", " # Step 2: Create a list of 0s with length equal to the sequence length\n", " binary_list = [0] * sequence_len\n", " \n", " # Step 3: Retrieve the indices and set the corresponding positions to 1\n", " if pd.notna(binding_active_str):\n", " # Get the indices from the binding-active site string\n", " indices = [int(x) - 1 for segment in binding_active_str.split(';') for x in segment.split(',') if x.strip().isdigit()]\n", " for idx in indices:\n", " # Ensure the index is within the valid range\n", " if 0 <= idx < sequence_len:\n", " binary_list[idx] = 1\n", " \n", " # Step 4: Return the binary list\n", " return binary_list\n", "\n", "# Apply the function to both datasets\n", "test_df['Binding-Active site'] = test_df.apply(lambda row: convert_to_binary_list(row['Binding-Active site'], len(row['Sequence'])), axis=1)\n", "train_df['Binding-Active site'] = train_df.apply(lambda row: convert_to_binary_list(row['Binding-Active site'], len(row['Sequence'])), axis=1)\n" ] }, { "cell_type": "code", "execution_count": 6, "id": "4cea2656-75eb-4350-b1a6-704c18793473", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
EntryProtein familiesBinding siteActive siteSequenceBinding-Active site
791321A0A0C2CBT0TDD superfamily, TSR3 family; Protein kinase s...275; 323; 346NoneMFDVFSGHNDAVLCVQYRDQESLAVSGSADNSIKCWDTRTGRPEMT...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
1008964A0A0N4V212TDD superfamily, TSR3 family; Protein kinase s...131; 179; 202NoneMVGYGVRARASGYHGRSKFRVKNKRKADKSYAENVSELAADSSRAI...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
1009019A0A0N4XGU1TDD superfamily, TSR3 family; Protein kinase s...73; 121; 178NoneMGKKGREQHGNKRTNKSRHADAGDAEPLSSHGEEDSESLDESRDDH...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
1837901A0A1I8B1G5TDD superfamily, TSR3 family; Protein kinase s...40; 88; 111NoneMASTDSSQSSDEDAKVEKAKKMPCILAMFDFGQCDPKRCSGRKLCR...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
5447097A0A6V7USP8TDD superfamily, TSR3 family; Protein kinase s...61; 109; 132NoneMLFMVVPVLIMMQVDVVAIKKMTNTDSSESSGDDAVDDKSKKMPCI...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
\n", "
" ], "text/plain": [ " Entry Protein families \\\n", "791321 A0A0C2CBT0 TDD superfamily, TSR3 family; Protein kinase s... \n", "1008964 A0A0N4V212 TDD superfamily, TSR3 family; Protein kinase s... \n", "1009019 A0A0N4XGU1 TDD superfamily, TSR3 family; Protein kinase s... \n", "1837901 A0A1I8B1G5 TDD superfamily, TSR3 family; Protein kinase s... \n", "5447097 A0A6V7USP8 TDD superfamily, TSR3 family; Protein kinase s... \n", "\n", " Binding site Active site \\\n", "791321 275; 323; 346 None \n", "1008964 131; 179; 202 None \n", "1009019 73; 121; 178 None \n", "1837901 40; 88; 111 None \n", "5447097 61; 109; 132 None \n", "\n", " Sequence \\\n", "791321 MFDVFSGHNDAVLCVQYRDQESLAVSGSADNSIKCWDTRTGRPEMT... \n", "1008964 MVGYGVRARASGYHGRSKFRVKNKRKADKSYAENVSELAADSSRAI... \n", "1009019 MGKKGREQHGNKRTNKSRHADAGDAEPLSSHGEEDSESLDESRDDH... \n", "1837901 MASTDSSQSSDEDAKVEKAKKMPCILAMFDFGQCDPKRCSGRKLCR... \n", "5447097 MLFMVVPVLIMMQVDVVAIKKMTNTDSSESSGDDAVDDKSKKMPCI... \n", "\n", " Binding-Active site \n", "791321 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "1008964 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "1009019 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "1837901 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "5447097 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... " ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "test_df.head()" ] }, { "cell_type": "code", "execution_count": 7, "id": "bf55ec46-3685-41a3-a382-46273940ed79", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
EntryProtein familiesBinding siteActive siteSequenceBinding-Active site
1A0A009GI323-hydroxyacyl-CoA dehydrogenase family; Enoyl-...298; 326; 345; 402..404; 409; 431; 455; 502452MIHAGNAITVQMLADGIAEFRFDLQGESVNKFNRATIEDFKAAIAA...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
3A0A009HWM53-hydroxyacyl-CoA dehydrogenase family; Enoyl-...298; 326; 345; 402..404; 409; 431; 455; 502452MIHAGNAITVQMLADGIAEFRFDLQGESVNKFNRATIEDFKAAIAA...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
4A0A009I6Q13-hydroxyacyl-CoA dehydrogenase family; Enoyl-...298; 326; 345; 402..404; 409; 431; 455; 502452MIHAGNAITVQMLSDGIAEFRFDLQGESVNKFNRATIEDFQAAIAA...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
7A0A009NCR43-hydroxyacyl-CoA dehydrogenase family; Enoyl-...298; 326; 345; 402..404; 409; 431; 455; 502452MIHAGNAITVQMLSDGIAEFRFDLQGESVNKFNRATIEDFQAAIAA...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
9A0A009QK393-hydroxyacyl-CoA dehydrogenase family; Enoyl-...298; 326; 345; 402..404; 409; 431; 455; 502452MIHAGNAITVQMLADGIAEFRFDLQGESVNKFNRATIEDFKAAIAA...[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
\n", "
" ], "text/plain": [ " Entry Protein families \\\n", "1 A0A009GI32 3-hydroxyacyl-CoA dehydrogenase family; Enoyl-... \n", "3 A0A009HWM5 3-hydroxyacyl-CoA dehydrogenase family; Enoyl-... \n", "4 A0A009I6Q1 3-hydroxyacyl-CoA dehydrogenase family; Enoyl-... \n", "7 A0A009NCR4 3-hydroxyacyl-CoA dehydrogenase family; Enoyl-... \n", "9 A0A009QK39 3-hydroxyacyl-CoA dehydrogenase family; Enoyl-... \n", "\n", " Binding site Active site \\\n", "1 298; 326; 345; 402..404; 409; 431; 455; 502 452 \n", "3 298; 326; 345; 402..404; 409; 431; 455; 502 452 \n", "4 298; 326; 345; 402..404; 409; 431; 455; 502 452 \n", "7 298; 326; 345; 402..404; 409; 431; 455; 502 452 \n", "9 298; 326; 345; 402..404; 409; 431; 455; 502 452 \n", "\n", " Sequence \\\n", "1 MIHAGNAITVQMLADGIAEFRFDLQGESVNKFNRATIEDFKAAIAA... \n", "3 MIHAGNAITVQMLADGIAEFRFDLQGESVNKFNRATIEDFKAAIAA... \n", "4 MIHAGNAITVQMLSDGIAEFRFDLQGESVNKFNRATIEDFQAAIAA... \n", "7 MIHAGNAITVQMLSDGIAEFRFDLQGESVNKFNRATIEDFQAAIAA... \n", "9 MIHAGNAITVQMLADGIAEFRFDLQGESVNKFNRATIEDFKAAIAA... \n", "\n", " Binding-Active site \n", "1 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "3 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "4 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "7 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... \n", "9 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... " ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train_df.head()" ] }, { "cell_type": "code", "execution_count": 8, "id": "1a997e94-2bea-4c56-89f2-f10737c96447", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "('065_data/test_labels_chunked_by_family.pkl',\n", " '065_data/test_sequences_chunked_by_family.pkl',\n", " '065_data/train_labels_chunked_by_family.pkl',\n", " '065_data/train_sequences_chunked_by_family.pkl')" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pickle\n", "import random\n", "\n", "def split_into_chunks(sequences, labels):\n", " \"\"\"Split sequences and labels into chunks of size 1000 or less.\"\"\"\n", " chunk_size = 1000\n", " new_sequences = []\n", " new_labels = []\n", " \n", " for seq, lbl in zip(sequences, labels):\n", " if len(seq) > chunk_size:\n", " # Split the sequence and labels into chunks of size 1000 or less\n", " for i in range(0, len(seq), chunk_size):\n", " new_sequences.append(seq[i:i+chunk_size])\n", " new_labels.append(lbl[i:i+chunk_size])\n", " else:\n", " new_sequences.append(seq)\n", " new_labels.append(lbl)\n", " \n", " return new_sequences, new_labels\n", "\n", "# Extract the necessary columns to create lists of sequences and labels\n", "test_sequences_by_family = test_df['Sequence'].tolist()\n", "test_labels_by_family = test_df['Binding-Active site'].tolist()\n", "train_sequences_by_family = train_df['Sequence'].tolist()\n", "train_labels_by_family = train_df['Binding-Active site'].tolist()\n", "\n", "# Get the number of samples in each dataset\n", "num_test_samples = len(test_sequences_by_family)\n", "num_train_samples = len(train_sequences_by_family)\n", "\n", "# Define the percentage of data you want to keep\n", "percentage_to_keep = 100 # for keeping 6.00% of the data\n", "\n", "# Generate random indices representing a percentage of each dataset\n", "random_test_indices = random.sample(range(num_test_samples), int(num_test_samples * (percentage_to_keep / 100)))\n", "random_train_indices = random.sample(range(num_train_samples), int(num_train_samples * (percentage_to_keep / 100)))\n", "\n", "# Create smaller datasets using the random indices\n", "test_sequences_small = [test_sequences_by_family[i] for i in random_test_indices]\n", "test_labels_small = [test_labels_by_family[i] for i in random_test_indices]\n", "train_sequences_small = [train_sequences_by_family[i] for i in random_train_indices]\n", "train_labels_small = [train_labels_by_family[i] for i in random_train_indices]\n", "\n", "# Apply the function to create new datasets with chunks of size 1000 or less\n", "test_sequences_chunked, test_labels_chunked = split_into_chunks(test_sequences_small, test_labels_small)\n", "train_sequences_chunked, train_labels_chunked = split_into_chunks(train_sequences_small, train_labels_small)\n", "\n", "# Paths to save the new chunked pickle files\n", "test_labels_chunked_path = '16M_data/test_labels_chunked_by_family.pkl'\n", "test_sequences_chunked_path = '16M_data/test_sequences_chunked_by_family.pkl'\n", "train_labels_chunked_path = '16M_data/train_labels_chunked_by_family.pkl'\n", "train_sequences_chunked_path = '16M_data/train_sequences_chunked_by_family.pkl'\n", "\n", "# Save the chunked datasets as new pickle files\n", "with open(test_labels_chunked_path, 'wb') as file:\n", " pickle.dump(test_labels_chunked, file)\n", "with open(test_sequences_chunked_path, 'wb') as file:\n", " pickle.dump(test_sequences_chunked, file)\n", "with open(train_labels_chunked_path, 'wb') as file:\n", " pickle.dump(train_labels_chunked, file)\n", "with open(train_sequences_chunked_path, 'wb') as file:\n", " pickle.dump(train_sequences_chunked, file)\n", "\n", "test_labels_chunked_path, test_sequences_chunked_path, train_labels_chunked_path, train_sequences_chunked_path\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "6479ec75-c1a2-403c-8139-43e9754cc137", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(220620, 220620, 890637, 890637)" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Load each pickle file and get the number of entries in each\n", "with open(test_labels_chunked_path, 'rb') as file:\n", " test_labels_chunked = pickle.load(file)\n", " num_test_labels_chunked = len(test_labels_chunked)\n", "\n", "with open(test_sequences_chunked_path, 'rb') as file:\n", " test_sequences_chunked = pickle.load(file)\n", " num_test_sequences_chunked = len(test_sequences_chunked)\n", "\n", "with open(train_labels_chunked_path, 'rb') as file:\n", " train_labels_chunked = pickle.load(file)\n", " num_train_labels_chunked = len(train_labels_chunked)\n", "\n", "with open(train_sequences_chunked_path, 'rb') as file:\n", " train_sequences_chunked = pickle.load(file)\n", " num_train_sequences_chunked = len(train_sequences_chunked)\n", "\n", "num_test_labels_chunked, num_test_sequences_chunked, num_train_labels_chunked, num_train_sequences_chunked\n" ] }, { "cell_type": "code", "execution_count": null, "id": "da7df429-62ab-4b8e-b3dd-7c5a9eb14921", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "esm2_binding_py38b", "language": "python", "name": "esm2_binding_py38b" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.17" } }, "nbformat": 4, "nbformat_minor": 5 }