"""Script for the model plants reference genomes dataset""" from typing import List import datasets from Bio import SeqIO import os _CITATION = "" _DESCRIPTION = """\ Dataset made of model plants genomes available on NCBI. Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that they can only contain the letters A, T, C, G and N. """ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/" _LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/" _CHUNK_LENGTHS = [6000,] def filter_fn(char: str) -> str: """ Transforms any letter different from a base nucleotide into an 'N'. """ if char in {'A', 'T', 'C', 'G'}: return char else: return 'N' def clean_sequence(seq: str) -> str: """ Process a chunk of DNA to have all letters in upper and restricted to A, T, C, G and N. """ seq = seq.upper() seq = map(filter_fn, seq) seq = ''.join(list(seq)) return seq class PlantSingleSpeciesGenomesConfig(datasets.BuilderConfig): """BuilderConfig for the Plant Single Species Pre-training Dataset.""" def __init__(self, *args, chunk_length: int, overlap: int = 100, **kwargs): """BuilderConfig for the single species genome. Args: chunk_length (:obj:`int`): Chunk length. overlap: (:obj:`int`): Overlap in base pairs for two consecutive chunks (defaults to 100). **kwargs: keyword arguments forwarded to super. """ num_kbp = int(chunk_length/1000) super().__init__( *args, name=f'{num_kbp}kbp', **kwargs, ) self.chunk_length = chunk_length self.overlap = overlap class PlantSingleSpeciesGenomes(datasets.GeneratorBasedBuilder): """Genome from a single species, filtered and split into chunks of consecutive nucleotides.""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIG_CLASS = PlantSingleSpeciesGenomesConfig BUILDER_CONFIGS = [PlantSingleSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS] DEFAULT_CONFIG_NAME = "6kbp" def _info(self): features = datasets.Features( { "sequence": datasets.Value("string"), "description": datasets.Value("string"), "start_pos": datasets.Value("int32"), "end_pos": datasets.Value("int32"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: # A.thaliana reference genome filepath = "GCF_000001735.4_TAIR10.1_genomic.fna.gz" downloaded_file = dl_manager.download_and_extract(filepath) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file": downloaded_file, "chunk_length": self.config.chunk_length}) ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, file, chunk_length): key = 0 with open(file, 'rt') as f: fasta_sequences = SeqIO.parse(f, 'fasta') for record in fasta_sequences: sequence, description = str(record.seq), record.description # clean chromosome sequence sequence = clean_sequence(sequence) seq_length = len(sequence) # split into chunks num_chunks = (seq_length - 2 * self.config.overlap) // chunk_length if num_chunks < 1: continue sequence = sequence[:(chunk_length * num_chunks + 2 * self.config.overlap)] seq_length = len(sequence) for i in range(num_chunks): # get chunk start_pos = i * chunk_length end_pos = min(seq_length, (i+1) * chunk_length + 2 * self.config.overlap) chunk_sequence = sequence[start_pos:end_pos] # yield chunk yield key, { 'sequence': chunk_sequence, 'description': description, 'start_pos': start_pos, 'end_pos': end_pos, } key += 1