maom commited on
Commit
1593542
·
1 Parent(s): 9768260

Update curation pipeline

Browse files
.gitignore CHANGED
@@ -1 +1,3 @@
1
- *~
 
 
 
1
+ *~
2
+ data
3
+ intermediate
src/00_setup_curation.sh CHANGED
@@ -4,14 +4,11 @@
4
 
5
  # from a base directory
6
 
7
-
8
- mkdir data
9
- mkdir intermediate
10
-
11
  git clone https://<user_name>:<security_token>huggingface.co/RosettaCommons/MegaScale
12
 
 
13
  # needed to get splits
14
- git clone https://github.com/Kuhlman-Lab/ThermoMPNN.git
15
 
16
 
17
 
 
4
 
5
  # from a base directory
6
 
 
 
 
 
7
  git clone https://<user_name>:<security_token>huggingface.co/RosettaCommons/MegaScale
8
 
9
+
10
  # needed to get splits
11
+ cd data && git clone https://github.com/Kuhlman-Lab/ThermoMPNN.git
12
 
13
 
14
 
src/02.1_gather_ThermoMPNN_splits.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import pandas
4
+ import pyarrow.parquet
5
+ import pickle
6
+
7
+
8
+
9
+
10
+
11
+ # Gabe requested that the splits defined in ThermoMPNN of the MegaScale dataset be the default splits
12
+
13
+ # ThermoMPNN/datsets.py
14
+ # class MegaScaleDataset
15
+ # def __init__(csg, split):
16
+ # fname = self.cfg.data_loc.megascale_csv
17
+ # df = pd.read_csv(fname, usecols=["ddG_ML", "mut_type", "WT_name", "aa_seq", "dG_ML"])
18
+ #
19
+ # # remove unreliable data and more complicated mutations
20
+ # df = df.loc[df.ddG_ML != '-', :].reset_index(drop=True)
21
+ # df = df.loc[
22
+ # ~df.mut_type.str.contains("ins") &
23
+ # ~df.mut_type.str.contains("del") &
24
+ # ~df.mut_type.str.contains(":"), :].reset_index(drop=True)
25
+ #
26
+ # splits = <load from self.cfg.data_loc.megascale_splits>
27
+ #
28
+ # if self.split != 'all' and (cfg.reduce != 'prot' or self.split != 'train'):
29
+ # self.wt_names = splits[split]
30
+ #
31
+ #
32
+ #
33
+
34
+ # local.yaml
35
+ # data_loc:
36
+ # megascale_csv: "<truncated>/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv"
37
+
38
+
39
+
40
+
41
+
42
+
43
+
44
+
45
+ with open("data/ThermoMPNN/dataset_splits/mega_splits.pkl", "rb") as f:
46
+ mega_splits = pickle.load(f)
47
+
48
+ splits = []
49
+ for split_name, split_ids in mega_splits.items():
50
+ splits.append(
51
+ pandas.DataFrame({
52
+ 'split_name': split_name,
53
+ 'id': split_ids}))
54
+
55
+ splits = pandas.concat(splits)
56
+ splits.reset_index(drop=True, inplace=True)
57
+
58
+ pyarrow.parquet.write_table(
59
+ pyarrow.Table.from_pandas(splits),
60
+ where = "intermediate/ThermoMPNN_splits.parquet")
61
+
62
+
63
+ parquet_file = pyarrow.parquet.ParquetFile('intermediate/ThermoMPNN_splits.parquet')
64
+ parquet_file.metadata
65
+ # <pyarrow._parquet.FileMetaData object at 0x149f5d2667a0>
66
+ # created_by: parquet-cpp-arrow version 17.0.0
67
+ # num_columns: 2
68
+ # num_rows: 2020
69
+ # num_row_groups: 1
70
+ # format_version: 2.6
71
+ # serialized_size: 1881
src/{02.1_assemble_K50_dG_dataset.R → 02.2_assemble_K50_dG_dataset.R} RENAMED
@@ -1,7 +1,14 @@
 
 
1
  system("cd data; unzip Processed_K50_dG_datasets.zip")
2
 
3
 
 
 
 
4
  ### Dataset1 ###
 
 
5
  dataset1 <- readr::read_csv(
6
  file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset1_20230416.csv",
7
  col_types = readr::cols(
@@ -43,8 +50,65 @@ dataset1 |>
43
 
44
 
45
 
 
 
 
 
 
 
 
46
  dataset23 <- readr::read_csv(
47
- file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
 
50
  ####
 
1
+
2
+
3
  system("cd data; unzip Processed_K50_dG_datasets.zip")
4
 
5
 
6
+ ThermoMPNN_splits <- arrow::read_parquet("intermediate/ThermoMPNN_splits.parquet")
7
+
8
+
9
  ### Dataset1 ###
10
+
11
+ # Dataset1 consists of all cDNA proteolysis measurements of stability
12
  dataset1 <- readr::read_csv(
13
  file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset1_20230416.csv",
14
  col_types = readr::cols(
 
50
 
51
 
52
 
53
+ ### Dataset2 and Dataset3 ###
54
+
55
+ # Dataset2 (for dG ML) consists of cDNA proteolysis measurements of stability that are of class G0 + G1
56
+ # Datase3 (for ddG ML) consists of cDNA proteolysis measurements of stability that are of class G0
57
+ # G0: Good (wild-type ΔG values below 4.75 kcal mol^−1), 325,132 ΔG measurements at 17,093 sites in 365 domains
58
+ # G1: Good but WT outside dynamic range
59
+
60
  dataset23 <- readr::read_csv(
61
+ file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv",
62
+ show_col_types = FALSE)
63
+ # 776,298 rows
64
+
65
+ dataset23 |>
66
+ arrow::write_parquet(
67
+ "intermediate/dataset23.parquet")
68
+
69
+ dataset3 <- dataset23 |>
70
+ dplyr::filter(ddG_ML != "-")
71
+
72
+ dataset3_single_mutant <- dataset3 |>
73
+ dplyr::filter(!(mut_type |> stringr::str_detect("(ins|del|[:])")))
74
+
75
+
76
+ ThermoMPNN_splits |> dplyr::group_by(split_name) |>
77
+ dplyr::do({
78
+ split <- .
79
+ split_name <- split$split_name[1]
80
+ mutant_set <- dataset3_single_mutant |>
81
+ dplyr::filter(mut_type != "wt") |>
82
+ dplyr::semi_join(split, by = c("WT_name" = "id"))
83
+ cat("Writing out split ", split_name, ", nrow: ", nrow(mutant_set), "\n", sep = "")
84
+
85
+ arrow::write_parquet(
86
+ x = mutant_set,
87
+ sink = paste0("intermediate/dataset3_ThermoMPNN_", split_name, ".parquet"))
88
+ data.frame()
89
+ })
90
+
91
+
92
+ dataset3_single_mutant_train <- dataset3_single_mutant |>
93
+ dplyr::filter(mut_type != "wt") |>
94
+ dplyr::semi_join(
95
+ ThermoMPNN_splits |>
96
+ dplyr::filter(split_name == "train"),
97
+ by = c("WT_name" = "id"))
98
+
99
+ dataset3_single_mutant_val <- dataset3_single_mutant |>
100
+ dplyr::filter(mut_type != "wt") |>
101
+ dplyr::semi_join(
102
+ ThermoMPNN_splits |>
103
+ dplyr::filter(split_name == "val"),
104
+ by = c("WT_name" = "id"))
105
+
106
+ dataset3_single_mutant_test <- dataset3_single_mutant |>
107
+ dplyr::filter(mut_type != "wt") |>
108
+ dplyr::semi_join(
109
+ ThermoMPNN_splits |>
110
+ dplyr::filter(split_name == "test"),
111
+ by = c("WT_name" = "id"))
112
 
113
 
114
  ####
src/02.8_gather_ThermoMPNN_splits.py DELETED
@@ -1,20 +0,0 @@
1
-
2
-
3
- import pandas
4
- import pyarrow.parquet
5
- import pickle
6
-
7
- with open("ThermoMPNN/dataset_splits/mega_splits.pkl", "rb") as f:
8
- mega_splits = pickle.load(f)
9
-
10
- splits = []
11
- for split_name, split_ids in mega_splits.items():
12
- splits.append(
13
- pandas.DataFrame({
14
- 'split_name': split_name,
15
- 'id': split_ids}))
16
-
17
- splits = pandas.concat(splits)
18
- pyarrow.parquet.write_table(
19
- pyarrow.Table.from_pandas(splits),
20
- where = "intermediate/ThermoMPNN_splits.parquet")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/03.1_upload_data.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ # install huggingface_hub from the command line:
5
+ #
6
+ # pip install huggingface_hub
7
+ # pip install datasets
8
+ #
9
+ # Log into huggingface hub (this only needs to be done once per project, and then it is cached)
10
+ #
11
+ # huggingface-cli login
12
+ #
13
+ # This will ask you for an access token
14
+
15
+
16
+ import datasets
17
+
18
+
19
+ # Dataset1
20
+ # Dataset2
21
+ # Dataset3
22
+ # Single Mutants
23
+ #
24
+
25
+ # dataset1
26
+ # dataset2
27
+ # dataset3
28
+ # dataset3_single
29
+ # dataset3_single_CV
30
+
31
+
32
+
33
+
34
+ ##### dataset3_single #######
35
+ dataset = datasets.load_dataset(
36
+ "parquet",
37
+ name = "dataset3_single",
38
+ data_dir = "./intermediate",
39
+ data_files = {
40
+ "train" : "dataset3_ThermoMPNN_train.parquet",
41
+ "val" : "dataset3_ThermoMPNN_val.parquet",
42
+ "test" : "dataset3_ThermoMPNN_test.parquet"},
43
+ cache_dir = "/scratch/maom_root/maom0/maom",
44
+ keep_in_memory = True)
45
+
46
+ dataset.push_to_hub(
47
+ repo_id = "maom/MegaScale",
48
+ config_name = "dataset3_single",
49
+ data_dir = "dataset3_single/data")
50
+
51
+
52
+ ##### dataset3_single #######
53
+ dataset = datasets.load_dataset(
54
+ "parquet",
55
+ name = "dataset3_single_CV",
56
+ data_dir = "./intermediate",
57
+ data_files = {
58
+ "train_0" : "dataset3_ThermoMPNN_train_0.parquet",
59
+ "train_1" : "dataset3_ThermoMPNN_train_1.parquet",
60
+ "train_2" : "dataset3_ThermoMPNN_train_2.parquet",
61
+ "train_3" : "dataset3_ThermoMPNN_train_3.parquet",
62
+ "train_4" : "dataset3_ThermoMPNN_train_4.parquet",
63
+ "val_0" : "dataset3_ThermoMPNN_val_0.parquet",
64
+ "val_1" : "dataset3_ThermoMPNN_val_1.parquet",
65
+ "val_2" : "dataset3_ThermoMPNN_val_2.parquet",
66
+ "val_3" : "dataset3_ThermoMPNN_val_3.parquet",
67
+ "val_4" : "dataset3_ThermoMPNN_val_4.parquet",
68
+ "test_0" : "dataset3_ThermoMPNN_test_0.parquet",
69
+ "test_1" : "dataset3_ThermoMPNN_test_1.parquet",
70
+ "test_2" : "dataset3_ThermoMPNN_test_2.parquet",
71
+ "test_3" : "dataset3_ThermoMPNN_test_3.parquet",
72
+ "test_4" : "dataset3_ThermoMPNN_test_4.parquet"},
73
+ cache_dir = "/scratch/maom_root/maom0/maom",
74
+ keep_in_memory = True)
75
+
76
+ dataset.push_to_hub(
77
+ repo_id = "MaomLab/MegaScale",
78
+ config_name = "dataset3_single_CV",
79
+ data_dir = "datase3_single/data")