# See the License for the specific language governing permissions and # limitations under the License. import os from typing import List import datasets _CITATION = '''\ @article{mendoza2023foundational, title={A Foundational Large Language Model for Edible Plant Genomes}, author={Mendoza-Revilla, Javier and Trop, Evan and Gonzalez, Liam and Roller, Masa and Dalla-Torre, Hugo and de Almeida, Bernardo P and Richard, Guillaume and Caton, Jonathan and Lopez Carranza, Nicolas and Skwark, Marcin and others}, journal={bioRxiv}, pages={2023--10}, year={2023}, publisher={Cold Spring Harbor Laboratory} } ''' _DESCRIPTION = """\ This dataset comprises the various supervised learning tasks considered in the agro-nt paper. The task types include binary classification,multi-label classification, regression,and multi-output regression. The actual underlying genomic tasks range from predicting regulatory features, RNA processing sites, and gene expression values. """ _LICENSE = "https://huggingface.co/datasets/InstaDeepAI/plant-genomic-benchmark/blob/main/LICENSE.md" _TASK_NAMES = ['poly_a.arabidopsis_thaliana', 'poly_a.oryza_sativa_indica_group', 'poly_a.trifolium_pratense', 'poly_a.medicago_truncatula', 'poly_a.chlamydomonas_reinhardtii', 'poly_a.oryza_sativa_japonica_group', 'splicing.arabidopsis_thaliana_donor', 'splicing.arabidopsis_thaliana_acceptor', 'lncrna.m_esculenta', 'lncrna.z_mays', 'lncrna.g_max', 'lncrna.s_lycopersicum', 'lncrna.t_aestivum', 'lncrna.s_bicolor', 'promoter_strength.leaf', 'promoter_strength.protoplast', 'terminator_strength.leaf', 'terminator_strength.protoplast', 'gene_exp.glycine_max', 'gene_exp.oryza_sativa', 'gene_exp.solanum_lycopersicum', 'gene_exp.zea_mays', 'gene_exp.arabidopsis_thaliana', 'chromatin_access.oryza_sativa_MH63_RS2', 'chromatin_access.setaria_italica', 'chromatin_access.oryza_sativa_ZS97_RS2', 'chromatin_access.arabidopis_thaliana', 'chromatin_access.brachypodium_distachyon', 'chromatin_access.sorghum_bicolor', 'chromatin_access.zea_mays', 'pro_seq.m_esculenta'] _TASK_INFO = {'poly_a':{'type': 'binary', 'val_set':False}, 'splicing':{'type': 'binary', 'val_set':False}, 'lncrna':{'type': 'binary', 'val_set':False}, 'promoter_strength': {'type': 'regression', 'val_set': True}, 'terminator_strength': {'type': 'regression', 'val_set': True}, 'gene_exp':{'type':'multi_regression','val_set':True}, 'chromatin_access':{'type':'multi_label','val_set':True}, 'pro_seq':{'type':'binary','val_set':True} } # This function is a basic reimplementation of SeqIO's parse method. This allows the # dataset viewer to work as it does not require an external package. def parse_fasta(fp): name, seq = None, [] for line in fp: line = line.rstrip() if line.startswith(">"): if name: # Slice to remove '>' yield (name[1:], "".join(seq)) name, seq = line, [] else: seq.append(line) if name: # Slice to remove '>' yield (name[1:], "".join(seq)) class AgroNtTasksConfig(datasets.BuilderConfig): """BuilderConfig for the Agro NT supervised learning tasks dataset.""" def __init__(self, *args, task_name: str, **kwargs): """BuilderConfig downstream tasks dataset. Args: task (:obj:`str`): Task name. **kwargs: keyword arguments forwarded to super. """ self.task,self.sub_task = task_name.split(".") self.task_type = _TASK_INFO[self.task]['type'] self.val_set = _TASK_INFO[self.task]['val_set'] super().__init__( *args, name=f"{task_name}", **kwargs, ) class AgroNtTasks(datasets.GeneratorBasedBuilder): """GeneratorBasedBuilder for the Agro NT supervised learning tasks dataset.""" BUILDER_CONFIG_CLASS = AgroNtTasksConfig VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [AgroNtTasksConfig(task_name=TASK_NAME) for TASK_NAME in _TASK_NAMES] def _info(self): feature_dit = {"sequence": datasets.Value("string"), "name": datasets.Value("string")} if self.config.task_type == 'binary': feature_dit["label"] = datasets.Value("int8") elif self.config.task_type == 'regression': feature_dit["label"] = datasets.Value("float32") elif self.config.task_type == 'multi_regression': feature_dit['labels'] = [datasets.Value("float32")] elif self.config.task_type == 'multi_label': feature_dit['labels'] = [datasets.Value("int8")] features = datasets.Features(feature_dit) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]: train_file = dl_manager.download_and_extract( os.path.join(self.config.task, self.config.sub_task + "_train.fa")) test_file = dl_manager.download_and_extract( os.path.join(self.config.task, self.config.sub_task + "_test.fa")) generator_list = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": train_file, }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": test_file, }, ), ] if self.config.val_set: validation_file = dl_manager.download_and_extract( os.path.join(self.config.task, self.config.sub_task + "_validation.fa")) generator_list += datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": validation_file, }, ), return generator_list # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath): key = 0 with open(filepath, "rt") as f: fasta_sequences = parse_fasta(f) for name, seq in fasta_sequences: # Yields examples as (key, example) tuples sequence, name = str(seq), str(name) split_name = name.split("|") name = split_name[0] labels = split_name[1:] if 'multi' in self.config.task_type: yield key, { "sequence": sequence, "name": name, "labels": labels } else: yield key, { "sequence": sequence, "name": name, "label": labels[0], } key += 1