ten_species / ten_species.py
yairschiff's picture
Update ten_species.py
0857cc8 verified
raw
history blame
5.36 kB
from typing import List
import datasets
import pandas as pd
from Bio import SeqIO
_CHUNK_LENGTHS = [16384, 32768]
def filter_fn(char: str) -> str:
"""
Transforms any letter different from a base nucleotide into an 'N'.
"""
if char in {'A', 'T', 'C', 'G'}:
return char
else:
return 'N'
def clean_sequence(seq: str) -> str:
"""
Process a chunk of DNA to have all letters in upper and restricted to
A, T, C, G and N.
"""
seq = seq.upper()
seq = map(filter_fn, seq)
seq = ''.join(list(seq))
return seq
class TenSpeciesGenomesConfig(datasets.BuilderConfig):
"""BuilderConfig for The Human Reference Genome."""
def __init__(self, *args, chunk_length: int, overlap: int = 0, **kwargs):
"""BuilderConfig for the multi species genomes.
Args:
chunk_length (:obj:`int`): Chunk length.
overlap: (:obj:`int`): Overlap in base pairs for two consecutive chunks (defaults to 0).
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name=f'{chunk_length}bp',
**kwargs,
)
self.chunk_length = chunk_length
self.overlap = overlap
class TenSpeciesGenomes(datasets.GeneratorBasedBuilder):
"""Genomes from 10 species, filtered and split into chunks of consecutive nucleotides.
Species include:
- Homo_sapiens
- Mus_musculus
- Drosophila_melanogaster
- Danio_rerio
- Caenorhabditis_elegans
- Gallus_gallus
- Gorilla_gorilla
- Felis_catus
- Salmo_trutta
- Arabidopsis_thaliana
"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = TenSpeciesGenomesConfig
BUILDER_CONFIGS = [TenSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS]
DEFAULT_CONFIG_NAME = "32768bp"
def _info(self):
features = datasets.Features(
{
"sequence": datasets.Value("string"),
"species_label": datasets.ClassLabel(
num_classes=10,
names=['Homo_sapiens', 'Mus_musculus', 'Drosophila_melanogaster', 'Danio_rerio',
'Caenorhabditis_elegans', 'Gallus_gallus', 'Gorilla_gorilla', 'Felis_catus',
'Salmo_trutta', 'Arabidopsis_thaliana']),
"description": datasets.Value("string"),
"start_pos": datasets.Value("int32"),
"end_pos": datasets.Value("int32"),
"fasta_url": datasets.Value("string")
}
)
return datasets.DatasetInfo(
# This defines the different columns of the dataset and their types
features=features,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls_filepath = dl_manager.download_and_extract('ten_species_urls.csv')
with open(urls_filepath) as urls_file:
all_species = [line.rstrip().split(',')[0] for line in urls_file]
with open(urls_filepath) as urls_file:
urls = [line.rstrip().split(',')[-1] for line in urls_file]
all_species = tuple(all_species)
downloaded_files = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"all_species": all_species, "files": downloaded_files,
"chunk_length": self.config.chunk_length, "overlap": self.config.overlap}
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, all_species, files, chunk_length, overlap):
key = 0
for species, file in zip(all_species, files):
with open(file, 'rt') as f:
fasta_sequences = SeqIO.parse(f, 'fasta')
for record in fasta_sequences:
# parse descriptions in the fasta file
sequence, description = str(record.seq), record.description
# clean chromosome sequence
sequence = clean_sequence(sequence)
seq_length = len(sequence)
# split into chunks
num_chunks = (seq_length - 2 * overlap) // chunk_length
if num_chunks < 1:
continue
sequence = sequence[:(chunk_length * num_chunks + 2 * overlap)]
seq_length = len(sequence)
for i in range(num_chunks):
# get chunk
start_pos = i * chunk_length
end_pos = min(seq_length, (i+1) * chunk_length + 2 * overlap)
chunk_sequence = sequence[start_pos:end_pos]
# yield chunk
yield key, {
'sequence': chunk_sequence,
'species_label': species,
'start_pos': start_pos,
'end_pos': end_pos,
'fasta_url': file.split('::')[-1]
}
key += 1