{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from typing import Dict, List, Union\n", "import os\n", "import json\n", "import s3fs\n", "import datasets\n", "from datasets import load_from_disk, load_dataset\n", "from huggingface_hub import hf_hub_download\n", "import pandas as pd\n", "\n", "s3 = s3fs.S3FileSystem(anon=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def build_rna_seq_dataset(\n", " data: pd.DataFrame, \n", " info: datasets.DatasetInfo = None,\n", " preserve_index: bool = True,\n", "):\n", " return datasets.Dataset.from_pandas(\n", " data,\n", " info=info,\n", " preserve_index=preserve_index,\n", " )\n", " \n", "# ds = build_rna_seq_dataset(breast_df)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class Recount3Info:\n", " _tcga_paths = None\n", " _gtex_paths = None\n", " _sra_paths = None\n", " \n", " def __init__(\n", " self,\n", " storage_options: Dict = {\"anon\": True}\n", " ):\n", " self.fs = s3fs.S3FileSystem(**storage_options)\n", " \n", " @property\n", " def tcga_s3_filepaths(self):\n", " if self._tcga_paths is None:\n", " self._load_subset_index(\"tcga\")\n", " return self._tcga_paths\n", " \n", " @property\n", " def gtex_s3_filepaths(self):\n", " if self._gtex_paths is None:\n", " self._load_subset_index(\"gtex\")\n", " return self._gtex_paths\n", " \n", " @property\n", " def sra_s3_filepaths(self):\n", " if self._sra_paths is None:\n", " self._load_subset_index(\"sra\")\n", " return self._sra_paths\n", " \n", " @property\n", " def tcga_dataset_ids(self):\n", " return list(self.tcga_s3_filepaths.keys())\n", " \n", " @property\n", " def gtex_dataset_ids(self):\n", " return list(self.gtex_s3_filepaths.keys())\n", " \n", " @property\n", " def sra_dataset_ids(self):\n", " return list(self.sra_s3_filepaths.keys())\n", " \n", " def s3_filepath(self, subset: str = \"tcga\", dataset_id: str = \"BRCA\", repo_id: str = \"jarrydmartinx/recount3-RNA-seq\"):\n", " return getattr(self, f\"{subset}_s3_filepaths\")[dataset_id]\n", " \n", " def public_dataset_url(self, subset: str = \"tcga\", dataset_id: str = \"BRCA\", repo_id: str = \"jarrydmartinx/recount3-RNA-seq\"):\n", " filepath = self.s3_filepath(subset=subset, dataset_id=dataset_id, repo_id=repo_id)\n", " return self._s3_public_url(s3_filepath=filepath) \n", " \n", " def _load_subset_index(self, subset: str = \"tcga\", repo_id: str = \"jarrydmartinx/recount3-RNA-seq\"):\n", " path = hf_hub_download(repo_id=repo_id, filename=f\"{subset}_index.json\", repo_type=\"dataset\")\n", " with open(path) as f:\n", " setattr(self, f\"_{subset}_paths\", json.load(f))\n", " \n", " def _s3_public_url(self, s3_filepath: str, bucket_name=\"recount-opendata\"):\n", " url_tail = s3_filepath[17:]\n", " url = f'https://{bucket_name}.s3.amazonaws.com/{url_tail}'\n", " \n", " return url" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# # Dataset loading script\n", "\n", "# # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.\n", "# #\n", "# # Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# # you may not use this file except in compliance with the License.\n", "# # You may obtain a copy of the License at\n", "# #\n", "# # http://www.apache.org/licenses/LICENSE-2.0\n", "# #\n", "# # Unless required by applicable law or agreed to in writing, software\n", "# # distributed under the License is distributed on an \"AS IS\" BASIS,\n", "# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# # See the License for the specific language governing permissions and\n", "# # limitations under the License.\n", "# # TODO: Address all TODOs and remove all explanatory comments\n", "# \"\"\"TODO: Add a description here.\"\"\"\n", "\n", "# import datasets\n", "# import os\n", "# import json\n", "# import csv\n", "# from datasets import load_from_disk\n", "\n", "# # TODO: Add BibTeX citation\n", "# # Find for instance the citation on arxiv or on the dataset repo/website\n", "# _CITATION = \"\"\"\\\n", "# @InProceedings{huggingface:dataset,\n", "# title = {A great new dataset},\n", "# author={huggingface, Inc.\n", "# },\n", "# year={2023}\n", "# }\n", "# \"\"\"\n", "\n", "# # TODO: Add description of the dataset here\n", "# # You can copy an official description\n", "# _DESCRIPTION = \"\"\"\\\n", "# This new dataset is designed to solve this great NLP task and is crafted with a lot of care.\n", "# \"\"\"\n", "\n", "# # TODO: Add a link to an official homepage for the dataset here\n", "# _HOMEPAGE = \"\"\n", "\n", "# # TODO: Add the licence for the dataset here if you can find it\n", "# _LICENSE = \"\"\n", "\n", "# # TODO: Add link to the official dataset URLs here\n", "# # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.\n", "# # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)\n", "# _URLS = {\n", "# \"first_domain\": \"https://huggingface.co/great-new-dataset-first_domain.zip\",\n", "# \"second_domain\": \"https://huggingface.co/great-new-dataset-second_domain.zip\",\n", "# }\n", "\n", "\n", "# # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case\n", "# class Recount3Dataset(datasets.GeneratorBasedBuilder):\n", "# \"\"\"TODO: Short description of my dataset.\"\"\"\n", "\n", "# VERSION = datasets.Version(\"1.1.0\")\n", "\n", "# # This is an example of a dataset with multiple configurations.\n", "# # If you don't want/need to define several sub-sets in your dataset,\n", "# # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.\n", "\n", "# # If you need to make complex sub-parts in the datasets with configurable options\n", "# # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig\n", "# # BUILDER_CONFIG_CLASS = MyBuilderConfig\n", "\n", "# # You will be able to load one or the other configurations in the following list with\n", "# # data = datasets.load_dataset('my_dataset', 'first_domain')\n", "# # data = datasets.load_dataset('my_dataset', 'second_domain')\n", "# BUILDER_CONFIGS = [\n", "# datasets.BuilderConfig(name=\"tcga\", version=VERSION, description=\"This part of my dataset covers a first domain\"),\n", "# datasets.BuilderConfig(name=\"second_domain\", version=VERSION, description=\"This part of my dataset covers a second domain\"),\n", "# datasets.BuilderConfig(name=\"first_domain\", version=VERSION, description=\"This part of my dataset covers a first domain\"),\n", "# ]\n", "\n", "# DEFAULT_CONFIG_NAME = \"first_domain\" # It's not mandatory to have a default configuration. Just use one if it make sense.\n", "\n", "# def _info(self):\n", "# # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset\n", "# if self.config.name == \"first_domain\": # This is the name of the configuration selected in BUILDER_CONFIGS above\n", "# features = datasets.Features(\n", "# {\n", "# \"sentence\": datasets.Value(\"string\"),\n", "# \"option1\": datasets.Value(\"string\"),\n", "# \"answer\": datasets.Value(\"string\")\n", "# # These are the features of your dataset like images, labels ...\n", "# }\n", "# )\n", "# else: # This is an example to show how to have different features for \"first_domain\" and \"second_domain\"\n", "# features = datasets.Features(\n", "# {\n", "# \"sentence\": datasets.Value(\"string\"),\n", "# \"option2\": datasets.Value(\"string\"),\n", "# \"second_domain_answer\": datasets.Value(\"string\")\n", "# # These are the features of your dataset like images, labels ...\n", "# }\n", "# )\n", "# return datasets.DatasetInfo(\n", "# # This is the description that will appear on the datasets page.\n", "# description=_DESCRIPTION,\n", "# # This defines the different columns of the dataset and their types\n", "# features=features, # Here we define them above because they are different between the two configurations\n", "# # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and\n", "# # specify them. They'll be used if as_supervised=True in builder.as_dataset.\n", "# # supervised_keys=(\"sentence\", \"label\"),\n", "# # Homepage of the dataset for documentation\n", "# homepage=_HOMEPAGE,\n", "# # License for the dataset if available\n", "# license=_LICENSE,\n", "# # Citation for the dataset\n", "# citation=_CITATION,\n", "# )\n", "\n", "# def _split_generators(self, dl_manager):\n", "# # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration\n", "# # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name\n", "\n", "# # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS\n", "# # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.\n", "# # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive\n", "# urls = _URLS[self.config.name]\n", "# data_dir = dl_manager.download_and_extract(urls)\n", "# return [\n", "# datasets.SplitGenerator(\n", "# name=datasets.Split.TRAIN,\n", "# # These kwargs will be passed to _generate_examples\n", "# gen_kwargs={\n", "# \"filepath\": os.path.join(data_dir, \"train.jsonl\"),\n", "# \"split\": \"train\",\n", "# },\n", "# ),\n", "# datasets.SplitGenerator(\n", "# name=datasets.Split.VALIDATION,\n", "# # These kwargs will be passed to _generate_examples\n", "# gen_kwargs={\n", "# \"filepath\": os.path.join(data_dir, \"dev.jsonl\"),\n", "# \"split\": \"dev\",\n", "# },\n", "# ),\n", "# datasets.SplitGenerator(\n", "# name=datasets.Split.TEST,\n", "# # These kwargs will be passed to _generate_examples\n", "# gen_kwargs={\n", "# \"filepath\": os.path.join(data_dir, \"test.jsonl\"),\n", "# \"split\": \"test\"\n", "# },\n", "# ),\n", "# ]\n", "\n", "# # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`\n", "# def _generate_examples(self, filepath, split):\n", "# # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.\n", "# # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.\n", "# with open(filepath, encoding=\"utf-8\") as f:\n", "# for key, row in enumerate(f):\n", "# data = json.loads(row)\n", "# if self.config.name == \"first_domain\":\n", "# # Yields examples as (key, example) tuples\n", "# yield key, {\n", "# \"sentence\": data[\"sentence\"],\n", "# \"option1\": data[\"option1\"],\n", "# \"answer\": \"\" if split == \"test\" else data[\"answer\"],\n", "# }\n", "# else:\n", "# yield key, {\n", "# \"sentence\": data[\"sentence\"],\n", "# \"option2\": data[\"option2\"],\n", "# \"second_domain_answer\": \"\" if split == \"test\" else data[\"second_domain_answer\"],\n", "# }\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3.10.9 ('recount')", "language": "python", "name": "python3" }, "language_info": { "name": "python", "version": "3.10.9" }, "orig_nbformat": 4, "vscode": { "interpreter": { "hash": "ed875d186ce08f1d09fded8cb618f7757e0d4e9a9ff702057ff3018e00fc337a" } } }, "nbformat": 4, "nbformat_minor": 2 }