"""Hugging Face datasets module for the phytozome_genomes dataset""" __version__ = '0.1.0' import functools from pathlib import Path import sys import typing import Bio.SeqIO import datasets import pandas as pd import requests _CITATION = """\ @article{Goodstein2011, doi = {10.1093/nar/gkr944}, url = {https://doi.org/10.1093/nar/gkr944}, year = {2011}, month = nov, publisher = {Oxford University Press ({OUP})}, volume = {40}, number = {D1}, pages = {D1178--D1186}, author = {David M. Goodstein and Shengqiang Shu and Russell Howson and Rochak Neupane and Richard D. Hayes and Joni Fazo and Therese Mitros and William Dirks and Uffe Hellsten and Nicholas Putnam and Daniel S. Rokhsar}, title = {Phytozome: a comparative platform for green plant genomics}, journal = {Nucleic Acids Research} } """ def create_JGI_session(username: str, password: str) -> requests.Session: session = requests.Session() session.post( url = 'https://signon.jgi.doe.gov/signon/create', data = { 'login': username, 'password': password } ) return session def download_phytozome_file(jamo_id: str, destination: Path, jgi_session: requests.Session, chunk_size: int = (64 * 1024)): with jgi_session.get(f"https://files.jgi.doe.gov/download_files/{jamo_id}", stream = True) as response: if response.status_code == requests.codes.conflict: raise requests.exceptions.HTTPError(f"File '{jamo_id}' is not immediately available on Phytozome and has been requested from the tape archive; please try again in 24 hours.") response.raise_for_status() with open(destination, 'wb') as destination_file: for chunk in response.iter_content(chunk_size = chunk_size): destination_file.write(chunk) class PhytozomeGenomesConfig(datasets.BuilderConfig): def __init__(self, username: str = '', password: str = ''): super().__init__(version = datasets.Version(__version__)) self.username = username self.password = password def create_config_id( self, config_kwargs: dict, custom_features: typing.Optional[datasets.Features] = None ) -> str: config_kwargs_redacted = config_kwargs.copy() # don't include the JGI username or password in cache dir hash config_kwargs_redacted.pop('username', None) config_kwargs_redacted.pop('password', None) return super().create_config_id( config_kwargs = config_kwargs_redacted, custom_features = custom_features ) class PhytozomeGenomes(datasets.GeneratorBasedBuilder): """Dataset of genomes from Phytozome""" BUILDER_CONFIG_CLASS = PhytozomeGenomesConfig def _info(self): return datasets.DatasetInfo( description = self.__class__.__doc__, features = datasets.Features({ "seqid": datasets.Value("string"), "sequence": datasets.Value("string") }), homepage = 'https://phytozome-next.jgi.doe.gov', license = 'See the Data Policy for each genome', citation = _CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]: genomes_cache_path = dl_manager.download('genomes.tsv') genomes_metadata = pd.read_table( genomes_cache_path, index_col = 'id', header = 0 ) jgi_session = create_JGI_session( username = self.config.username, password = self.config.password ) download_phytozome_file_in_current_session = functools.partial(download_phytozome_file, jgi_session = jgi_session) genome_ids_train = genomes_metadata.iloc[:90].index.to_list() genome_ids_validate = genomes_metadata.iloc[90:95].index.to_list() genome_ids_test = genomes_metadata.iloc[95:].index.to_list() genome_paths_compressed_train = dl_manager.download_custom(genome_ids_train, download_phytozome_file_in_current_session) genome_paths_compressed_validate = dl_manager.download_custom(genome_ids_validate, download_phytozome_file_in_current_session) genome_paths_compressed_test = dl_manager.download_custom(genome_ids_test, download_phytozome_file_in_current_session) jgi_session.close() genome_paths_train = dl_manager.extract(genome_paths_compressed_train) genome_paths_validate = dl_manager.extract(genome_paths_compressed_validate) genome_paths_test = dl_manager.extract(genome_paths_compressed_test) return [ datasets.SplitGenerator( name = datasets.Split.TRAIN, gen_kwargs = { "genome_paths": genome_paths_train, "split": "train", }, ), datasets.SplitGenerator( name = datasets.Split.VALIDATION, gen_kwargs = { "genome_paths": genome_paths_validate, "split": "validate", }, ), datasets.SplitGenerator( name = datasets.Split.TEST, gen_kwargs = { "genome_paths": genome_paths_test, "split": "test" }, ), ] def _generate_examples(self, genome_paths: list[str], split: str): for genome_path in genome_paths: for record in Bio.SeqIO.parse(genome_path, 'fasta'): key = f"{genome_path}:{record.id}" yield key, { 'seqid': record.id, 'sequence': str(record.seq) }