true-cds-protein-tasks / true-cds-protein-tasks.py
sboshar's picture
Upload 20 files
41c285e verified
raw
history blame contribute delete
No virus
5.38 kB
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import datasets
def parse_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name:
# Slice to remove '>'
yield (name[1:], "".join(seq))
name, seq = line, []
else:
seq.append(line)
if name:
# Slice to remove '>'
yield (name[1:], "".join(seq))
_CITATION = """\
@article{boshar2024gLMsForProteins,
title={Are Genomic Language Models All You Need? Exploring Genomic Language Models on Protein Downstream Tasks},
author={Sam Boshar, Evan Trop, Bernardo P. de Almeida, Lviua Copoiu, Thomas Pierrot},
journal={bioRxiv},
pages={},
year={2024},
publisher={}
}
'''
"""
# You can copy an official description
_DESCRIPTION = """\
This dataset comprises 5 downstream protein tasks with associated true CDS sequences considered in the paper. The tasks include five which are regression, and one which is multi-label classification. Each task corresponds to a dataset configuration.
"""
_HOMEPAGE = "https://github.com/instadeepai/gLMs-for-proteins"
_LICENSE = "https://github.com/instadeepai/nucleotide-transformer/LICENSE.md"
_TASKS = ['beta_lactamase_complete',
'beta_lactamase_unique',
'ssp',
'stability',
'melting_point',
'fluorescence'
]
class ProteinTrueCDSConfig(datasets.BuilderConfig):
"""BuilderConfig for protein True CDS tasks."""
def __init__(self, *args, task: str, **kwargs):
"""BuilderConfig downstream tasks dataset.
Args:
task (:obj:`str`): Task name.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name=f"{task}",
**kwargs,
)
self.task = task
class ProteinTrueCDSDownstreamTasks(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = ProteinTrueCDSConfig
BUILDER_CONFIGS = [
ProteinTrueCDSConfig(task=task) for task in _TASKS
]
# DEFAULT_CONFIG_NAME = "enhancers"
def _info(self):
if self.config.task == 'ssp':
label_type = datasets.Sequence(datasets.Value("int32"))
else:
label_type = datasets.Value("float32")
features = datasets.Features(
{
"sequence": datasets.Value("string"),
"label": label_type,
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
if self.config.task == 'ssp':
train_file = dl_manager.download_and_extract(self.config.task + "/train.fna")
train_dataset = datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
)
test_datasets = [
datasets.SplitGenerator(
name=name, gen_kwargs={
"file": dl_manager.download_and_extract(self.config.task + f"/{name}.fna")
}
) for name in ['CASP12', 'CB513', 'TS115']]
return [train_dataset] + test_datasets
else:
val_file = dl_manager.download_and_extract(self.config.task + "/val.fna")
train_file = dl_manager.download_and_extract(self.config.task + "/train.fna")
test_file = dl_manager.download_and_extract(self.config.task + "/test.fna")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"file": test_file}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"file": val_file}
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file):
key = 0
with open(file, "rt") as f:
fasta_sequences = parse_fasta(f)
for name, seq in fasta_sequences:
# parse descriptions in the fasta file
sequence, name = str(seq), str(name)
if self.config.task != 'ssp':
label = float(name.split("|")[-1])
else:
label = [int(i) for i in name.split("|")[-1]]
# yield example
yield key, {
"sequence": sequence,
"label": label,
}
key += 1