EC2 Default User commited on
Commit
dd4dc1f
1 Parent(s): 8e17b21

update parquet

Browse files
README.md CHANGED
@@ -23,6 +23,6 @@ configs:
23
  - https://opig.stats.ox.ac.uk/webapps/oas/oas_paired/
24
 
25
 
26
- ### Dataset Summary
27
 
28
  Paired heavy- and light-chain sequence information from the Observed Antibody Space (OAS) database, downloaded on September 9, 2023.
 
23
  - https://opig.stats.ox.ac.uk/webapps/oas/oas_paired/
24
 
25
 
26
+ ## Dataset Summary
27
 
28
  Paired heavy- and light-chain sequence information from the Observed Antibody Space (OAS) database, downloaded on September 9, 2023.
human.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15f68167648ede70b384eb9671c162617a251bf47ce758702ce6912c2cbfe05b
3
- size 60616532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:385678cd991b8639196d97b51f1f2396d32ced223bdacd60ffb806cf3b568701
3
+ size 92818695
mouse_BALB_c.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e0991362b6f59e1fec7751d2696db0d4ef4ab6c609a9d87d98c8ec6061df705
3
- size 319404
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aadc6d7524d60d80109143772bc307bae06b8aabace45c87bdd2ca3aa7dd777f
3
+ size 586744
mouse_C57BL_6.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33b0a43b2d4bcc9e24d6d1f5c747dbc9a21b4e46220da6786f0fda6e024b6f35
3
- size 90331
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f46d6e00e6bde03b91675d2ba03dcb3c6f67c944ae8dd9606a6c373a07c61ae3
3
+ size 174681
rat_SD.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1dd3a2a45f22e9e87cc13891f2db9c9e74129c23b7e8b94eda81fa1423863c40
3
- size 1082597
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b60b14ea7fe3c0216f8640b6cacd1e63543e15663e308d6998a8c6a0eac87e8
3
+ size 2053567
src/download_and_process_data.py CHANGED
@@ -9,9 +9,7 @@ species_list = ["human", "rat_SD", "mouse_BALB_c", "mouse_C57BL_6"]
9
 
10
  for species in species_list:
11
  print(f"Downloading {species} files")
12
- species_df = pd.DataFrame(
13
- columns=["run_id", "sequence_alignment_aa_heavy", "sequence_alignment_aa_light"]
14
- )
15
  species_url_file = os.path.join(data_dir, species + "_oas_paired.txt")
16
  with open(species_url_file, "r") as f:
17
  for csv_file in f.readlines():
@@ -27,15 +25,24 @@ for species in species_list:
27
  run_data = run_data[
28
  [
29
  "sequence_alignment_aa_heavy",
 
 
 
30
  "sequence_alignment_aa_light",
 
 
 
31
  ]
32
  ]
33
- run_data.insert(0, "run_id", run_id)
34
- species_df = pd.concat([species_df, run_data])
 
 
 
35
  print(f"{species} output summary:")
36
  print(species_df.head())
37
  print(species_df.shape)
38
  output_file_name = os.path.join(output_path, species + ".parquet")
39
  print(f"Creating {output_file_name}")
40
- species_df.to_parquet(output_file_name, compression="gzip")
41
 
 
9
 
10
  for species in species_list:
11
  print(f"Downloading {species} files")
12
+ list_of_df = []
 
 
13
  species_url_file = os.path.join(data_dir, species + "_oas_paired.txt")
14
  with open(species_url_file, "r") as f:
15
  for csv_file in f.readlines():
 
25
  run_data = run_data[
26
  [
27
  "sequence_alignment_aa_heavy",
28
+ "cdr1_aa_heavy",
29
+ "cdr2_aa_heavy",
30
+ "cdr3_aa_heavy",
31
  "sequence_alignment_aa_light",
32
+ "cdr1_aa_light",
33
+ "cdr2_aa_light",
34
+ "cdr3_aa_light",
35
  ]
36
  ]
37
+ run_data.insert(
38
+ 0, "pair_id", run_id + "_" + run_data.reset_index().index.map(str)
39
+ )
40
+ list_of_df.append(run_data)
41
+ species_df = pd.concat(list_of_df, ignore_index=True)
42
  print(f"{species} output summary:")
43
  print(species_df.head())
44
  print(species_df.shape)
45
  output_file_name = os.path.join(output_path, species + ".parquet")
46
  print(f"Creating {output_file_name}")
47
+ species_df.to_parquet(output_file_name, compression="gzip", index=False)
48
 
src/oas-paired-sequence-data.py DELETED
@@ -1,162 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """Paired sequences from the Observed Antibody Space database"""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
- _CITATION = """\
25
- @article{Olsen_Boyles_Deane_2022,
26
- title={Observed Antibody Space: A diverse database of cleaned, annotated, and translated unpaired and paired antibody sequences},
27
- volume={31}, rights={© 2021 The Authors. Protein Science published by Wiley Periodicals LLC on behalf of The Protein Society.},
28
- ISSN={1469-896X}, DOI={10.1002/pro.4205},
29
- number={1}, journal={Protein Science}, author={Olsen, Tobias H. and Boyles, Fergus and Deane, Charlotte M.},
30
- year={2022}, pages={141–146}, language={en} }
31
-
32
- """
33
- _DESCRIPTION = """\
34
- Paired heavy and light chain antibody sequences for multiple species.
35
- """
36
-
37
- _HOMEPAGE = "https://opig.stats.ox.ac.uk/webapps/oas/"
38
-
39
- _LICENSE = "cc-by-4.0"
40
-
41
- _URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/"
42
-
43
- _URLS = {
44
- "human": _URL + "human.zip",
45
- "rat_SD": _URL + "rat_SD.zip",
46
- "mouse_BALB_c": _URL + "mouse_BALB_c.zip",
47
- "mouse_C57BL_6": _URL + "mouse_C57BL_6.zip",
48
- }
49
-
50
- _FEATURES = datasets.Features(
51
- {
52
- "sequence_id_heavy": datasets.Value("string"),
53
- "sequence_heavy": datasets.Value("string"),
54
- "locus_heavy": datasets.Value("string"),
55
- "stop_codon_heavy": datasets.Value("string"),
56
- "productive_heavy": datasets.Value("string"),
57
- "rev_comp_heavy": datasets.Value("string"),
58
- "sequence_alignment_aa_heavy": datasets.Value("string"),
59
- "fwr1_aa_heavy": datasets.Value("string"),
60
- "cdr1_aa_heavy": datasets.Value("string"),
61
- "fwr2_aa_heavy": datasets.Value("string"),
62
- "cdr2_aa_heavy": datasets.Value("string"),
63
- "fwr3_aa_heavy": datasets.Value("string"),
64
- "cdr3_aa_heavy": datasets.Value("string"),
65
- "junction_aa_heavy": datasets.Value("string"),
66
- "sequence_id_light": datasets.Value("string"),
67
- "sequence_light": datasets.Value("string"),
68
- "locus_light": datasets.Value("string"),
69
- "stop_codon_light": datasets.Value("string"),
70
- "productive_light": datasets.Value("string"),
71
- "rev_comp_light": datasets.Value("string"),
72
- "sequence_alignment_aa_light": datasets.Value("string"),
73
- "fwr1_aa_light": datasets.Value("string"),
74
- "cdr1_aa_light": datasets.Value("string"),
75
- "fwr2_aa_light": datasets.Value("string"),
76
- "cdr2_aa_light": datasets.Value("string"),
77
- "fwr3_aa_light": datasets.Value("string"),
78
- "cdr3_aa_light": datasets.Value("string"),
79
- "junction_aa_light": datasets.Value("string"),
80
- }
81
- )
82
-
83
- class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
84
- """OAS paired sequence data."""
85
-
86
- VERSION = datasets.Version("1.1.0")
87
-
88
- # You will be able to load one or the other configurations in the following list with
89
- # data = datasets.load_dataset('my_dataset', 'first_domain')
90
- # data = datasets.load_dataset('my_dataset', 'second_domain')
91
- BUILDER_CONFIGS = [
92
- datasets.BuilderConfig(name="human", version=VERSION, description="Human"),
93
- datasets.BuilderConfig(name="rat_SD", version=VERSION, description="rat_SD"),
94
- datasets.BuilderConfig(
95
- name="mouse_BALB_c", version=VERSION, description="mouse_BALB_c"
96
- ),
97
- datasets.BuilderConfig(
98
- name="mouse_C57BL_6", version=VERSION, description="mouse_C57BL_6"
99
- ),
100
- ]
101
-
102
- def _info(self):
103
-
104
- return datasets.DatasetInfo(
105
- description=_DESCRIPTION,
106
- features=_FEATURES,
107
- homepage=_HOMEPAGE,
108
- license=_LICENSE,
109
- citation=_CITATION,
110
- )
111
-
112
- def _split_generators(self, dl_manager):
113
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
114
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
115
-
116
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
117
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
118
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
119
- urls = _URLS[self.config.name]
120
- data_dir = dl_manager.download_and_extract(urls)
121
- return [
122
- datasets.SplitGenerator(
123
- name=datasets.Split.TRAIN,
124
- # These kwargs will be passed to _generate_examples
125
- gen_kwargs={
126
- # "filepath": os.path.join(data_dir, "train.jsonl"),
127
- "filepath": data_dir,
128
- "split": "train",
129
- },
130
- ),
131
- # datasets.SplitGenerator(
132
- # name=datasets.Split.VALIDATION,
133
- # # These kwargs will be passed to _generate_examples
134
- # gen_kwargs={
135
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
136
- # "split": "dev",
137
- # },
138
- # ),
139
- # datasets.SplitGenerator(
140
- # name=datasets.Split.TEST,
141
- # # These kwargs will be passed to _generate_examples
142
- # gen_kwargs={
143
- # "filepath": os.path.join(data_dir, "test.jsonl"),
144
- # "split": "test",
145
- # },
146
- # ),
147
- ]
148
-
149
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
150
- def _generate_examples(self, filepath, split):
151
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
152
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
153
- with open(filepath, encoding="utf-8") as f:
154
- for key, row in enumerate(f):
155
- data = json.loads(row)
156
- yield key, data
157
- # yield key, {
158
- # "sentence": data["sentence"],
159
- # "option2": data["option2"],
160
- # "second_domain_answer": ""
161
- # }
162
-